Merge tag 'ntb-4.8' of git://github.com/jonmason/ntb
[cascardo/linux.git] / tools / perf / bench / numa.c
1 /*
2  * numa.c
3  *
4  * numa: Simulate NUMA-sensitive workload and measure their NUMA performance
5  */
6
7 /* For the CLR_() macros */
8 #include <pthread.h>
9
10 #include "../perf.h"
11 #include "../builtin.h"
12 #include "../util/util.h"
13 #include <subcmd/parse-options.h>
14 #include "../util/cloexec.h"
15
16 #include "bench.h"
17
18 #include <errno.h>
19 #include <sched.h>
20 #include <stdio.h>
21 #include <assert.h>
22 #include <malloc.h>
23 #include <signal.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h>
28 #include <sys/time.h>
29 #include <sys/resource.h>
30 #include <sys/wait.h>
31 #include <sys/prctl.h>
32 #include <sys/types.h>
33
34 #include <numa.h>
35 #include <numaif.h>
36
37 /*
38  * Regular printout to the terminal, supressed if -q is specified:
39  */
40 #define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
41
42 /*
43  * Debug printf:
44  */
45 #define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
46
47 struct thread_data {
48         int                     curr_cpu;
49         cpu_set_t               bind_cpumask;
50         int                     bind_node;
51         u8                      *process_data;
52         int                     process_nr;
53         int                     thread_nr;
54         int                     task_nr;
55         unsigned int            loops_done;
56         u64                     val;
57         u64                     runtime_ns;
58         u64                     system_time_ns;
59         u64                     user_time_ns;
60         double                  speed_gbs;
61         pthread_mutex_t         *process_lock;
62 };
63
64 /* Parameters set by options: */
65
66 struct params {
67         /* Startup synchronization: */
68         bool                    serialize_startup;
69
70         /* Task hierarchy: */
71         int                     nr_proc;
72         int                     nr_threads;
73
74         /* Working set sizes: */
75         const char              *mb_global_str;
76         const char              *mb_proc_str;
77         const char              *mb_proc_locked_str;
78         const char              *mb_thread_str;
79
80         double                  mb_global;
81         double                  mb_proc;
82         double                  mb_proc_locked;
83         double                  mb_thread;
84
85         /* Access patterns to the working set: */
86         bool                    data_reads;
87         bool                    data_writes;
88         bool                    data_backwards;
89         bool                    data_zero_memset;
90         bool                    data_rand_walk;
91         u32                     nr_loops;
92         u32                     nr_secs;
93         u32                     sleep_usecs;
94
95         /* Working set initialization: */
96         bool                    init_zero;
97         bool                    init_random;
98         bool                    init_cpu0;
99
100         /* Misc options: */
101         int                     show_details;
102         int                     run_all;
103         int                     thp;
104
105         long                    bytes_global;
106         long                    bytes_process;
107         long                    bytes_process_locked;
108         long                    bytes_thread;
109
110         int                     nr_tasks;
111         bool                    show_quiet;
112
113         bool                    show_convergence;
114         bool                    measure_convergence;
115
116         int                     perturb_secs;
117         int                     nr_cpus;
118         int                     nr_nodes;
119
120         /* Affinity options -C and -N: */
121         char                    *cpu_list_str;
122         char                    *node_list_str;
123 };
124
125
126 /* Global, read-writable area, accessible to all processes and threads: */
127
128 struct global_info {
129         u8                      *data;
130
131         pthread_mutex_t         startup_mutex;
132         int                     nr_tasks_started;
133
134         pthread_mutex_t         startup_done_mutex;
135
136         pthread_mutex_t         start_work_mutex;
137         int                     nr_tasks_working;
138
139         pthread_mutex_t         stop_work_mutex;
140         u64                     bytes_done;
141
142         struct thread_data      *threads;
143
144         /* Convergence latency measurement: */
145         bool                    all_converged;
146         bool                    stop_work;
147
148         int                     print_once;
149
150         struct params           p;
151 };
152
153 static struct global_info       *g = NULL;
154
155 static int parse_cpus_opt(const struct option *opt, const char *arg, int unset);
156 static int parse_nodes_opt(const struct option *opt, const char *arg, int unset);
157
158 struct params p0;
159
160 static const struct option options[] = {
161         OPT_INTEGER('p', "nr_proc"      , &p0.nr_proc,          "number of processes"),
162         OPT_INTEGER('t', "nr_threads"   , &p0.nr_threads,       "number of threads per process"),
163
164         OPT_STRING('G', "mb_global"     , &p0.mb_global_str,    "MB", "global  memory (MBs)"),
165         OPT_STRING('P', "mb_proc"       , &p0.mb_proc_str,      "MB", "process memory (MBs)"),
166         OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"),
167         OPT_STRING('T', "mb_thread"     , &p0.mb_thread_str,    "MB", "thread  memory (MBs)"),
168
169         OPT_UINTEGER('l', "nr_loops"    , &p0.nr_loops,         "max number of loops to run (default: unlimited)"),
170         OPT_UINTEGER('s', "nr_secs"     , &p0.nr_secs,          "max number of seconds to run (default: 5 secs)"),
171         OPT_UINTEGER('u', "usleep"      , &p0.sleep_usecs,      "usecs to sleep per loop iteration"),
172
173         OPT_BOOLEAN('R', "data_reads"   , &p0.data_reads,       "access the data via writes (can be mixed with -W)"),
174         OPT_BOOLEAN('W', "data_writes"  , &p0.data_writes,      "access the data via writes (can be mixed with -R)"),
175         OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards,  "access the data backwards as well"),
176         OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"),
177         OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk,  "access the data with random (32bit LFSR) walk"),
178
179
180         OPT_BOOLEAN('z', "init_zero"    , &p0.init_zero,        "bzero the initial allocations"),
181         OPT_BOOLEAN('I', "init_random"  , &p0.init_random,      "randomize the contents of the initial allocations"),
182         OPT_BOOLEAN('0', "init_cpu0"    , &p0.init_cpu0,        "do the initial allocations on CPU#0"),
183         OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs,      "perturb thread 0/0 every X secs, to test convergence stability"),
184
185         OPT_INCR   ('d', "show_details" , &p0.show_details,     "Show details"),
186         OPT_INCR   ('a', "all"          , &p0.run_all,          "Run all tests in the suite"),
187         OPT_INTEGER('H', "thp"          , &p0.thp,              "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
188         OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"),
189         OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
190         OPT_BOOLEAN('q', "quiet"        , &p0.show_quiet,       "quiet mode"),
191         OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
192
193         /* Special option string parsing callbacks: */
194         OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]",
195                         "bind the first N tasks to these specific cpus (the rest is unbound)",
196                         parse_cpus_opt),
197         OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]",
198                         "bind the first N tasks to these specific memory nodes (the rest is unbound)",
199                         parse_nodes_opt),
200         OPT_END()
201 };
202
203 static const char * const bench_numa_usage[] = {
204         "perf bench numa <options>",
205         NULL
206 };
207
208 static const char * const numa_usage[] = {
209         "perf bench numa mem [<options>]",
210         NULL
211 };
212
213 static cpu_set_t bind_to_cpu(int target_cpu)
214 {
215         cpu_set_t orig_mask, mask;
216         int ret;
217
218         ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
219         BUG_ON(ret);
220
221         CPU_ZERO(&mask);
222
223         if (target_cpu == -1) {
224                 int cpu;
225
226                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
227                         CPU_SET(cpu, &mask);
228         } else {
229                 BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
230                 CPU_SET(target_cpu, &mask);
231         }
232
233         ret = sched_setaffinity(0, sizeof(mask), &mask);
234         BUG_ON(ret);
235
236         return orig_mask;
237 }
238
239 static cpu_set_t bind_to_node(int target_node)
240 {
241         int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes;
242         cpu_set_t orig_mask, mask;
243         int cpu;
244         int ret;
245
246         BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus);
247         BUG_ON(!cpus_per_node);
248
249         ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
250         BUG_ON(ret);
251
252         CPU_ZERO(&mask);
253
254         if (target_node == -1) {
255                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
256                         CPU_SET(cpu, &mask);
257         } else {
258                 int cpu_start = (target_node + 0) * cpus_per_node;
259                 int cpu_stop  = (target_node + 1) * cpus_per_node;
260
261                 BUG_ON(cpu_stop > g->p.nr_cpus);
262
263                 for (cpu = cpu_start; cpu < cpu_stop; cpu++)
264                         CPU_SET(cpu, &mask);
265         }
266
267         ret = sched_setaffinity(0, sizeof(mask), &mask);
268         BUG_ON(ret);
269
270         return orig_mask;
271 }
272
273 static void bind_to_cpumask(cpu_set_t mask)
274 {
275         int ret;
276
277         ret = sched_setaffinity(0, sizeof(mask), &mask);
278         BUG_ON(ret);
279 }
280
281 static void mempol_restore(void)
282 {
283         int ret;
284
285         ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1);
286
287         BUG_ON(ret);
288 }
289
290 static void bind_to_memnode(int node)
291 {
292         unsigned long nodemask;
293         int ret;
294
295         if (node == -1)
296                 return;
297
298         BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
299         nodemask = 1L << node;
300
301         ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8);
302         dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret);
303
304         BUG_ON(ret);
305 }
306
307 #define HPSIZE (2*1024*1024)
308
309 #define set_taskname(fmt...)                            \
310 do {                                                    \
311         char name[20];                                  \
312                                                         \
313         snprintf(name, 20, fmt);                        \
314         prctl(PR_SET_NAME, name);                       \
315 } while (0)
316
317 static u8 *alloc_data(ssize_t bytes0, int map_flags,
318                       int init_zero, int init_cpu0, int thp, int init_random)
319 {
320         cpu_set_t orig_mask;
321         ssize_t bytes;
322         u8 *buf;
323         int ret;
324
325         if (!bytes0)
326                 return NULL;
327
328         /* Allocate and initialize all memory on CPU#0: */
329         if (init_cpu0) {
330                 orig_mask = bind_to_node(0);
331                 bind_to_memnode(0);
332         }
333
334         bytes = bytes0 + HPSIZE;
335
336         buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0);
337         BUG_ON(buf == (void *)-1);
338
339         if (map_flags == MAP_PRIVATE) {
340                 if (thp > 0) {
341                         ret = madvise(buf, bytes, MADV_HUGEPAGE);
342                         if (ret && !g->print_once) {
343                                 g->print_once = 1;
344                                 printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n");
345                         }
346                 }
347                 if (thp < 0) {
348                         ret = madvise(buf, bytes, MADV_NOHUGEPAGE);
349                         if (ret && !g->print_once) {
350                                 g->print_once = 1;
351                                 printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n");
352                         }
353                 }
354         }
355
356         if (init_zero) {
357                 bzero(buf, bytes);
358         } else {
359                 /* Initialize random contents, different in each word: */
360                 if (init_random) {
361                         u64 *wbuf = (void *)buf;
362                         long off = rand();
363                         long i;
364
365                         for (i = 0; i < bytes/8; i++)
366                                 wbuf[i] = i + off;
367                 }
368         }
369
370         /* Align to 2MB boundary: */
371         buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1));
372
373         /* Restore affinity: */
374         if (init_cpu0) {
375                 bind_to_cpumask(orig_mask);
376                 mempol_restore();
377         }
378
379         return buf;
380 }
381
382 static void free_data(void *data, ssize_t bytes)
383 {
384         int ret;
385
386         if (!data)
387                 return;
388
389         ret = munmap(data, bytes);
390         BUG_ON(ret);
391 }
392
393 /*
394  * Create a shared memory buffer that can be shared between processes, zeroed:
395  */
396 static void * zalloc_shared_data(ssize_t bytes)
397 {
398         return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0,  g->p.thp, g->p.init_random);
399 }
400
401 /*
402  * Create a shared memory buffer that can be shared between processes:
403  */
404 static void * setup_shared_data(ssize_t bytes)
405 {
406         return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
407 }
408
409 /*
410  * Allocate process-local memory - this will either be shared between
411  * threads of this process, or only be accessed by this thread:
412  */
413 static void * setup_private_data(ssize_t bytes)
414 {
415         return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
416 }
417
418 /*
419  * Return a process-shared (global) mutex:
420  */
421 static void init_global_mutex(pthread_mutex_t *mutex)
422 {
423         pthread_mutexattr_t attr;
424
425         pthread_mutexattr_init(&attr);
426         pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
427         pthread_mutex_init(mutex, &attr);
428 }
429
430 static int parse_cpu_list(const char *arg)
431 {
432         p0.cpu_list_str = strdup(arg);
433
434         dprintf("got CPU list: {%s}\n", p0.cpu_list_str);
435
436         return 0;
437 }
438
439 static int parse_setup_cpu_list(void)
440 {
441         struct thread_data *td;
442         char *str0, *str;
443         int t;
444
445         if (!g->p.cpu_list_str)
446                 return 0;
447
448         dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
449
450         str0 = str = strdup(g->p.cpu_list_str);
451         t = 0;
452
453         BUG_ON(!str);
454
455         tprintf("# binding tasks to CPUs:\n");
456         tprintf("#  ");
457
458         while (true) {
459                 int bind_cpu, bind_cpu_0, bind_cpu_1;
460                 char *tok, *tok_end, *tok_step, *tok_len, *tok_mul;
461                 int bind_len;
462                 int step;
463                 int mul;
464
465                 tok = strsep(&str, ",");
466                 if (!tok)
467                         break;
468
469                 tok_end = strstr(tok, "-");
470
471                 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
472                 if (!tok_end) {
473                         /* Single CPU specified: */
474                         bind_cpu_0 = bind_cpu_1 = atol(tok);
475                 } else {
476                         /* CPU range specified (for example: "5-11"): */
477                         bind_cpu_0 = atol(tok);
478                         bind_cpu_1 = atol(tok_end + 1);
479                 }
480
481                 step = 1;
482                 tok_step = strstr(tok, "#");
483                 if (tok_step) {
484                         step = atol(tok_step + 1);
485                         BUG_ON(step <= 0 || step >= g->p.nr_cpus);
486                 }
487
488                 /*
489                  * Mask length.
490                  * Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4',
491                  * where the _4 means the next 4 CPUs are allowed.
492                  */
493                 bind_len = 1;
494                 tok_len = strstr(tok, "_");
495                 if (tok_len) {
496                         bind_len = atol(tok_len + 1);
497                         BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus);
498                 }
499
500                 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
501                 mul = 1;
502                 tok_mul = strstr(tok, "x");
503                 if (tok_mul) {
504                         mul = atol(tok_mul + 1);
505                         BUG_ON(mul <= 0);
506                 }
507
508                 dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul);
509
510                 if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) {
511                         printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus);
512                         return -1;
513                 }
514
515                 BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
516                 BUG_ON(bind_cpu_0 > bind_cpu_1);
517
518                 for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
519                         int i;
520
521                         for (i = 0; i < mul; i++) {
522                                 int cpu;
523
524                                 if (t >= g->p.nr_tasks) {
525                                         printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu);
526                                         goto out;
527                                 }
528                                 td = g->threads + t;
529
530                                 if (t)
531                                         tprintf(",");
532                                 if (bind_len > 1) {
533                                         tprintf("%2d/%d", bind_cpu, bind_len);
534                                 } else {
535                                         tprintf("%2d", bind_cpu);
536                                 }
537
538                                 CPU_ZERO(&td->bind_cpumask);
539                                 for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
540                                         BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
541                                         CPU_SET(cpu, &td->bind_cpumask);
542                                 }
543                                 t++;
544                         }
545                 }
546         }
547 out:
548
549         tprintf("\n");
550
551         if (t < g->p.nr_tasks)
552                 printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
553
554         free(str0);
555         return 0;
556 }
557
558 static int parse_cpus_opt(const struct option *opt __maybe_unused,
559                           const char *arg, int unset __maybe_unused)
560 {
561         if (!arg)
562                 return -1;
563
564         return parse_cpu_list(arg);
565 }
566
567 static int parse_node_list(const char *arg)
568 {
569         p0.node_list_str = strdup(arg);
570
571         dprintf("got NODE list: {%s}\n", p0.node_list_str);
572
573         return 0;
574 }
575
576 static int parse_setup_node_list(void)
577 {
578         struct thread_data *td;
579         char *str0, *str;
580         int t;
581
582         if (!g->p.node_list_str)
583                 return 0;
584
585         dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
586
587         str0 = str = strdup(g->p.node_list_str);
588         t = 0;
589
590         BUG_ON(!str);
591
592         tprintf("# binding tasks to NODEs:\n");
593         tprintf("# ");
594
595         while (true) {
596                 int bind_node, bind_node_0, bind_node_1;
597                 char *tok, *tok_end, *tok_step, *tok_mul;
598                 int step;
599                 int mul;
600
601                 tok = strsep(&str, ",");
602                 if (!tok)
603                         break;
604
605                 tok_end = strstr(tok, "-");
606
607                 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
608                 if (!tok_end) {
609                         /* Single NODE specified: */
610                         bind_node_0 = bind_node_1 = atol(tok);
611                 } else {
612                         /* NODE range specified (for example: "5-11"): */
613                         bind_node_0 = atol(tok);
614                         bind_node_1 = atol(tok_end + 1);
615                 }
616
617                 step = 1;
618                 tok_step = strstr(tok, "#");
619                 if (tok_step) {
620                         step = atol(tok_step + 1);
621                         BUG_ON(step <= 0 || step >= g->p.nr_nodes);
622                 }
623
624                 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
625                 mul = 1;
626                 tok_mul = strstr(tok, "x");
627                 if (tok_mul) {
628                         mul = atol(tok_mul + 1);
629                         BUG_ON(mul <= 0);
630                 }
631
632                 dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step);
633
634                 if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) {
635                         printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes);
636                         return -1;
637                 }
638
639                 BUG_ON(bind_node_0 < 0 || bind_node_1 < 0);
640                 BUG_ON(bind_node_0 > bind_node_1);
641
642                 for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) {
643                         int i;
644
645                         for (i = 0; i < mul; i++) {
646                                 if (t >= g->p.nr_tasks) {
647                                         printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
648                                         goto out;
649                                 }
650                                 td = g->threads + t;
651
652                                 if (!t)
653                                         tprintf(" %2d", bind_node);
654                                 else
655                                         tprintf(",%2d", bind_node);
656
657                                 td->bind_node = bind_node;
658                                 t++;
659                         }
660                 }
661         }
662 out:
663
664         tprintf("\n");
665
666         if (t < g->p.nr_tasks)
667                 printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
668
669         free(str0);
670         return 0;
671 }
672
673 static int parse_nodes_opt(const struct option *opt __maybe_unused,
674                           const char *arg, int unset __maybe_unused)
675 {
676         if (!arg)
677                 return -1;
678
679         return parse_node_list(arg);
680
681         return 0;
682 }
683
684 #define BIT(x) (1ul << x)
685
686 static inline uint32_t lfsr_32(uint32_t lfsr)
687 {
688         const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
689         return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps);
690 }
691
692 /*
693  * Make sure there's real data dependency to RAM (when read
694  * accesses are enabled), so the compiler, the CPU and the
695  * kernel (KSM, zero page, etc.) cannot optimize away RAM
696  * accesses:
697  */
698 static inline u64 access_data(u64 *data __attribute__((unused)), u64 val)
699 {
700         if (g->p.data_reads)
701                 val += *data;
702         if (g->p.data_writes)
703                 *data = val + 1;
704         return val;
705 }
706
707 /*
708  * The worker process does two types of work, a forwards going
709  * loop and a backwards going loop.
710  *
711  * We do this so that on multiprocessor systems we do not create
712  * a 'train' of processing, with highly synchronized processes,
713  * skewing the whole benchmark.
714  */
715 static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val)
716 {
717         long words = bytes/sizeof(u64);
718         u64 *data = (void *)__data;
719         long chunk_0, chunk_1;
720         u64 *d0, *d, *d1;
721         long off;
722         long i;
723
724         BUG_ON(!data && words);
725         BUG_ON(data && !words);
726
727         if (!data)
728                 return val;
729
730         /* Very simple memset() work variant: */
731         if (g->p.data_zero_memset && !g->p.data_rand_walk) {
732                 bzero(data, bytes);
733                 return val;
734         }
735
736         /* Spread out by PID/TID nr and by loop nr: */
737         chunk_0 = words/nr_max;
738         chunk_1 = words/g->p.nr_loops;
739         off = nr*chunk_0 + loop*chunk_1;
740
741         while (off >= words)
742                 off -= words;
743
744         if (g->p.data_rand_walk) {
745                 u32 lfsr = nr + loop + val;
746                 int j;
747
748                 for (i = 0; i < words/1024; i++) {
749                         long start, end;
750
751                         lfsr = lfsr_32(lfsr);
752
753                         start = lfsr % words;
754                         end = min(start + 1024, words-1);
755
756                         if (g->p.data_zero_memset) {
757                                 bzero(data + start, (end-start) * sizeof(u64));
758                         } else {
759                                 for (j = start; j < end; j++)
760                                         val = access_data(data + j, val);
761                         }
762                 }
763         } else if (!g->p.data_backwards || (nr + loop) & 1) {
764
765                 d0 = data + off;
766                 d  = data + off + 1;
767                 d1 = data + words;
768
769                 /* Process data forwards: */
770                 for (;;) {
771                         if (unlikely(d >= d1))
772                                 d = data;
773                         if (unlikely(d == d0))
774                                 break;
775
776                         val = access_data(d, val);
777
778                         d++;
779                 }
780         } else {
781                 /* Process data backwards: */
782
783                 d0 = data + off;
784                 d  = data + off - 1;
785                 d1 = data + words;
786
787                 /* Process data forwards: */
788                 for (;;) {
789                         if (unlikely(d < data))
790                                 d = data + words-1;
791                         if (unlikely(d == d0))
792                                 break;
793
794                         val = access_data(d, val);
795
796                         d--;
797                 }
798         }
799
800         return val;
801 }
802
803 static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
804 {
805         unsigned int cpu;
806
807         cpu = sched_getcpu();
808
809         g->threads[task_nr].curr_cpu = cpu;
810         prctl(0, bytes_worked);
811 }
812
813 #define MAX_NR_NODES    64
814
815 /*
816  * Count the number of nodes a process's threads
817  * are spread out on.
818  *
819  * A count of 1 means that the process is compressed
820  * to a single node. A count of g->p.nr_nodes means it's
821  * spread out on the whole system.
822  */
823 static int count_process_nodes(int process_nr)
824 {
825         char node_present[MAX_NR_NODES] = { 0, };
826         int nodes;
827         int n, t;
828
829         for (t = 0; t < g->p.nr_threads; t++) {
830                 struct thread_data *td;
831                 int task_nr;
832                 int node;
833
834                 task_nr = process_nr*g->p.nr_threads + t;
835                 td = g->threads + task_nr;
836
837                 node = numa_node_of_cpu(td->curr_cpu);
838                 if (node < 0) /* curr_cpu was likely still -1 */
839                         return 0;
840
841                 node_present[node] = 1;
842         }
843
844         nodes = 0;
845
846         for (n = 0; n < MAX_NR_NODES; n++)
847                 nodes += node_present[n];
848
849         return nodes;
850 }
851
852 /*
853  * Count the number of distinct process-threads a node contains.
854  *
855  * A count of 1 means that the node contains only a single
856  * process. If all nodes on the system contain at most one
857  * process then we are well-converged.
858  */
859 static int count_node_processes(int node)
860 {
861         int processes = 0;
862         int t, p;
863
864         for (p = 0; p < g->p.nr_proc; p++) {
865                 for (t = 0; t < g->p.nr_threads; t++) {
866                         struct thread_data *td;
867                         int task_nr;
868                         int n;
869
870                         task_nr = p*g->p.nr_threads + t;
871                         td = g->threads + task_nr;
872
873                         n = numa_node_of_cpu(td->curr_cpu);
874                         if (n == node) {
875                                 processes++;
876                                 break;
877                         }
878                 }
879         }
880
881         return processes;
882 }
883
884 static void calc_convergence_compression(int *strong)
885 {
886         unsigned int nodes_min, nodes_max;
887         int p;
888
889         nodes_min = -1;
890         nodes_max =  0;
891
892         for (p = 0; p < g->p.nr_proc; p++) {
893                 unsigned int nodes = count_process_nodes(p);
894
895                 if (!nodes) {
896                         *strong = 0;
897                         return;
898                 }
899
900                 nodes_min = min(nodes, nodes_min);
901                 nodes_max = max(nodes, nodes_max);
902         }
903
904         /* Strong convergence: all threads compress on a single node: */
905         if (nodes_min == 1 && nodes_max == 1) {
906                 *strong = 1;
907         } else {
908                 *strong = 0;
909                 tprintf(" {%d-%d}", nodes_min, nodes_max);
910         }
911 }
912
913 static void calc_convergence(double runtime_ns_max, double *convergence)
914 {
915         unsigned int loops_done_min, loops_done_max;
916         int process_groups;
917         int nodes[MAX_NR_NODES];
918         int distance;
919         int nr_min;
920         int nr_max;
921         int strong;
922         int sum;
923         int nr;
924         int node;
925         int cpu;
926         int t;
927
928         if (!g->p.show_convergence && !g->p.measure_convergence)
929                 return;
930
931         for (node = 0; node < g->p.nr_nodes; node++)
932                 nodes[node] = 0;
933
934         loops_done_min = -1;
935         loops_done_max = 0;
936
937         for (t = 0; t < g->p.nr_tasks; t++) {
938                 struct thread_data *td = g->threads + t;
939                 unsigned int loops_done;
940
941                 cpu = td->curr_cpu;
942
943                 /* Not all threads have written it yet: */
944                 if (cpu < 0)
945                         continue;
946
947                 node = numa_node_of_cpu(cpu);
948
949                 nodes[node]++;
950
951                 loops_done = td->loops_done;
952                 loops_done_min = min(loops_done, loops_done_min);
953                 loops_done_max = max(loops_done, loops_done_max);
954         }
955
956         nr_max = 0;
957         nr_min = g->p.nr_tasks;
958         sum = 0;
959
960         for (node = 0; node < g->p.nr_nodes; node++) {
961                 nr = nodes[node];
962                 nr_min = min(nr, nr_min);
963                 nr_max = max(nr, nr_max);
964                 sum += nr;
965         }
966         BUG_ON(nr_min > nr_max);
967
968         BUG_ON(sum > g->p.nr_tasks);
969
970         if (0 && (sum < g->p.nr_tasks))
971                 return;
972
973         /*
974          * Count the number of distinct process groups present
975          * on nodes - when we are converged this will decrease
976          * to g->p.nr_proc:
977          */
978         process_groups = 0;
979
980         for (node = 0; node < g->p.nr_nodes; node++) {
981                 int processes = count_node_processes(node);
982
983                 nr = nodes[node];
984                 tprintf(" %2d/%-2d", nr, processes);
985
986                 process_groups += processes;
987         }
988
989         distance = nr_max - nr_min;
990
991         tprintf(" [%2d/%-2d]", distance, process_groups);
992
993         tprintf(" l:%3d-%-3d (%3d)",
994                 loops_done_min, loops_done_max, loops_done_max-loops_done_min);
995
996         if (loops_done_min && loops_done_max) {
997                 double skew = 1.0 - (double)loops_done_min/loops_done_max;
998
999                 tprintf(" [%4.1f%%]", skew * 100.0);
1000         }
1001
1002         calc_convergence_compression(&strong);
1003
1004         if (strong && process_groups == g->p.nr_proc) {
1005                 if (!*convergence) {
1006                         *convergence = runtime_ns_max;
1007                         tprintf(" (%6.1fs converged)\n", *convergence/1e9);
1008                         if (g->p.measure_convergence) {
1009                                 g->all_converged = true;
1010                                 g->stop_work = true;
1011                         }
1012                 }
1013         } else {
1014                 if (*convergence) {
1015                         tprintf(" (%6.1fs de-converged)", runtime_ns_max/1e9);
1016                         *convergence = 0;
1017                 }
1018                 tprintf("\n");
1019         }
1020 }
1021
1022 static void show_summary(double runtime_ns_max, int l, double *convergence)
1023 {
1024         tprintf("\r #  %5.1f%%  [%.1f mins]",
1025                 (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max/1e9 / 60.0);
1026
1027         calc_convergence(runtime_ns_max, convergence);
1028
1029         if (g->p.show_details >= 0)
1030                 fflush(stdout);
1031 }
1032
1033 static void *worker_thread(void *__tdata)
1034 {
1035         struct thread_data *td = __tdata;
1036         struct timeval start0, start, stop, diff;
1037         int process_nr = td->process_nr;
1038         int thread_nr = td->thread_nr;
1039         unsigned long last_perturbance;
1040         int task_nr = td->task_nr;
1041         int details = g->p.show_details;
1042         int first_task, last_task;
1043         double convergence = 0;
1044         u64 val = td->val;
1045         double runtime_ns_max;
1046         u8 *global_data;
1047         u8 *process_data;
1048         u8 *thread_data;
1049         u64 bytes_done;
1050         long work_done;
1051         u32 l;
1052         struct rusage rusage;
1053
1054         bind_to_cpumask(td->bind_cpumask);
1055         bind_to_memnode(td->bind_node);
1056
1057         set_taskname("thread %d/%d", process_nr, thread_nr);
1058
1059         global_data = g->data;
1060         process_data = td->process_data;
1061         thread_data = setup_private_data(g->p.bytes_thread);
1062
1063         bytes_done = 0;
1064
1065         last_task = 0;
1066         if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1)
1067                 last_task = 1;
1068
1069         first_task = 0;
1070         if (process_nr == 0 && thread_nr == 0)
1071                 first_task = 1;
1072
1073         if (details >= 2) {
1074                 printf("#  thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n",
1075                         process_nr, thread_nr, global_data, process_data, thread_data);
1076         }
1077
1078         if (g->p.serialize_startup) {
1079                 pthread_mutex_lock(&g->startup_mutex);
1080                 g->nr_tasks_started++;
1081                 pthread_mutex_unlock(&g->startup_mutex);
1082
1083                 /* Here we will wait for the main process to start us all at once: */
1084                 pthread_mutex_lock(&g->start_work_mutex);
1085                 g->nr_tasks_working++;
1086
1087                 /* Last one wake the main process: */
1088                 if (g->nr_tasks_working == g->p.nr_tasks)
1089                         pthread_mutex_unlock(&g->startup_done_mutex);
1090
1091                 pthread_mutex_unlock(&g->start_work_mutex);
1092         }
1093
1094         gettimeofday(&start0, NULL);
1095
1096         start = stop = start0;
1097         last_perturbance = start.tv_sec;
1098
1099         for (l = 0; l < g->p.nr_loops; l++) {
1100                 start = stop;
1101
1102                 if (g->stop_work)
1103                         break;
1104
1105                 val += do_work(global_data,  g->p.bytes_global,  process_nr, g->p.nr_proc,      l, val);
1106                 val += do_work(process_data, g->p.bytes_process, thread_nr,  g->p.nr_threads,   l, val);
1107                 val += do_work(thread_data,  g->p.bytes_thread,  0,          1,         l, val);
1108
1109                 if (g->p.sleep_usecs) {
1110                         pthread_mutex_lock(td->process_lock);
1111                         usleep(g->p.sleep_usecs);
1112                         pthread_mutex_unlock(td->process_lock);
1113                 }
1114                 /*
1115                  * Amount of work to be done under a process-global lock:
1116                  */
1117                 if (g->p.bytes_process_locked) {
1118                         pthread_mutex_lock(td->process_lock);
1119                         val += do_work(process_data, g->p.bytes_process_locked, thread_nr,  g->p.nr_threads,    l, val);
1120                         pthread_mutex_unlock(td->process_lock);
1121                 }
1122
1123                 work_done = g->p.bytes_global + g->p.bytes_process +
1124                             g->p.bytes_process_locked + g->p.bytes_thread;
1125
1126                 update_curr_cpu(task_nr, work_done);
1127                 bytes_done += work_done;
1128
1129                 if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs)
1130                         continue;
1131
1132                 td->loops_done = l;
1133
1134                 gettimeofday(&stop, NULL);
1135
1136                 /* Check whether our max runtime timed out: */
1137                 if (g->p.nr_secs) {
1138                         timersub(&stop, &start0, &diff);
1139                         if ((u32)diff.tv_sec >= g->p.nr_secs) {
1140                                 g->stop_work = true;
1141                                 break;
1142                         }
1143                 }
1144
1145                 /* Update the summary at most once per second: */
1146                 if (start.tv_sec == stop.tv_sec)
1147                         continue;
1148
1149                 /*
1150                  * Perturb the first task's equilibrium every g->p.perturb_secs seconds,
1151                  * by migrating to CPU#0:
1152                  */
1153                 if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
1154                         cpu_set_t orig_mask;
1155                         int target_cpu;
1156                         int this_cpu;
1157
1158                         last_perturbance = stop.tv_sec;
1159
1160                         /*
1161                          * Depending on where we are running, move into
1162                          * the other half of the system, to create some
1163                          * real disturbance:
1164                          */
1165                         this_cpu = g->threads[task_nr].curr_cpu;
1166                         if (this_cpu < g->p.nr_cpus/2)
1167                                 target_cpu = g->p.nr_cpus-1;
1168                         else
1169                                 target_cpu = 0;
1170
1171                         orig_mask = bind_to_cpu(target_cpu);
1172
1173                         /* Here we are running on the target CPU already */
1174                         if (details >= 1)
1175                                 printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
1176
1177                         bind_to_cpumask(orig_mask);
1178                 }
1179
1180                 if (details >= 3) {
1181                         timersub(&stop, &start, &diff);
1182                         runtime_ns_max = diff.tv_sec * 1000000000;
1183                         runtime_ns_max += diff.tv_usec * 1000;
1184
1185                         if (details >= 0) {
1186                                 printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016"PRIx64"]\n",
1187                                         process_nr, thread_nr, runtime_ns_max / bytes_done, val);
1188                         }
1189                         fflush(stdout);
1190                 }
1191                 if (!last_task)
1192                         continue;
1193
1194                 timersub(&stop, &start0, &diff);
1195                 runtime_ns_max = diff.tv_sec * 1000000000ULL;
1196                 runtime_ns_max += diff.tv_usec * 1000ULL;
1197
1198                 show_summary(runtime_ns_max, l, &convergence);
1199         }
1200
1201         gettimeofday(&stop, NULL);
1202         timersub(&stop, &start0, &diff);
1203         td->runtime_ns = diff.tv_sec * 1000000000ULL;
1204         td->runtime_ns += diff.tv_usec * 1000ULL;
1205         td->speed_gbs = bytes_done / (td->runtime_ns / 1e9) / 1e9;
1206
1207         getrusage(RUSAGE_THREAD, &rusage);
1208         td->system_time_ns = rusage.ru_stime.tv_sec * 1000000000ULL;
1209         td->system_time_ns += rusage.ru_stime.tv_usec * 1000ULL;
1210         td->user_time_ns = rusage.ru_utime.tv_sec * 1000000000ULL;
1211         td->user_time_ns += rusage.ru_utime.tv_usec * 1000ULL;
1212
1213         free_data(thread_data, g->p.bytes_thread);
1214
1215         pthread_mutex_lock(&g->stop_work_mutex);
1216         g->bytes_done += bytes_done;
1217         pthread_mutex_unlock(&g->stop_work_mutex);
1218
1219         return NULL;
1220 }
1221
1222 /*
1223  * A worker process starts a couple of threads:
1224  */
1225 static void worker_process(int process_nr)
1226 {
1227         pthread_mutex_t process_lock;
1228         struct thread_data *td;
1229         pthread_t *pthreads;
1230         u8 *process_data;
1231         int task_nr;
1232         int ret;
1233         int t;
1234
1235         pthread_mutex_init(&process_lock, NULL);
1236         set_taskname("process %d", process_nr);
1237
1238         /*
1239          * Pick up the memory policy and the CPU binding of our first thread,
1240          * so that we initialize memory accordingly:
1241          */
1242         task_nr = process_nr*g->p.nr_threads;
1243         td = g->threads + task_nr;
1244
1245         bind_to_memnode(td->bind_node);
1246         bind_to_cpumask(td->bind_cpumask);
1247
1248         pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t));
1249         process_data = setup_private_data(g->p.bytes_process);
1250
1251         if (g->p.show_details >= 3) {
1252                 printf(" # process %2d global mem: %p, process mem: %p\n",
1253                         process_nr, g->data, process_data);
1254         }
1255
1256         for (t = 0; t < g->p.nr_threads; t++) {
1257                 task_nr = process_nr*g->p.nr_threads + t;
1258                 td = g->threads + task_nr;
1259
1260                 td->process_data = process_data;
1261                 td->process_nr   = process_nr;
1262                 td->thread_nr    = t;
1263                 td->task_nr      = task_nr;
1264                 td->val          = rand();
1265                 td->curr_cpu     = -1;
1266                 td->process_lock = &process_lock;
1267
1268                 ret = pthread_create(pthreads + t, NULL, worker_thread, td);
1269                 BUG_ON(ret);
1270         }
1271
1272         for (t = 0; t < g->p.nr_threads; t++) {
1273                 ret = pthread_join(pthreads[t], NULL);
1274                 BUG_ON(ret);
1275         }
1276
1277         free_data(process_data, g->p.bytes_process);
1278         free(pthreads);
1279 }
1280
1281 static void print_summary(void)
1282 {
1283         if (g->p.show_details < 0)
1284                 return;
1285
1286         printf("\n ###\n");
1287         printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1288                 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus);
1289         printf(" #      %5dx %5ldMB global  shared mem operations\n",
1290                         g->p.nr_loops, g->p.bytes_global/1024/1024);
1291         printf(" #      %5dx %5ldMB process shared mem operations\n",
1292                         g->p.nr_loops, g->p.bytes_process/1024/1024);
1293         printf(" #      %5dx %5ldMB thread  local  mem operations\n",
1294                         g->p.nr_loops, g->p.bytes_thread/1024/1024);
1295
1296         printf(" ###\n");
1297
1298         printf("\n ###\n"); fflush(stdout);
1299 }
1300
1301 static void init_thread_data(void)
1302 {
1303         ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1304         int t;
1305
1306         g->threads = zalloc_shared_data(size);
1307
1308         for (t = 0; t < g->p.nr_tasks; t++) {
1309                 struct thread_data *td = g->threads + t;
1310                 int cpu;
1311
1312                 /* Allow all nodes by default: */
1313                 td->bind_node = -1;
1314
1315                 /* Allow all CPUs by default: */
1316                 CPU_ZERO(&td->bind_cpumask);
1317                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
1318                         CPU_SET(cpu, &td->bind_cpumask);
1319         }
1320 }
1321
1322 static void deinit_thread_data(void)
1323 {
1324         ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1325
1326         free_data(g->threads, size);
1327 }
1328
1329 static int init(void)
1330 {
1331         g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0);
1332
1333         /* Copy over options: */
1334         g->p = p0;
1335
1336         g->p.nr_cpus = numa_num_configured_cpus();
1337
1338         g->p.nr_nodes = numa_max_node() + 1;
1339
1340         /* char array in count_process_nodes(): */
1341         BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0);
1342
1343         if (g->p.show_quiet && !g->p.show_details)
1344                 g->p.show_details = -1;
1345
1346         /* Some memory should be specified: */
1347         if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str)
1348                 return -1;
1349
1350         if (g->p.mb_global_str) {
1351                 g->p.mb_global = atof(g->p.mb_global_str);
1352                 BUG_ON(g->p.mb_global < 0);
1353         }
1354
1355         if (g->p.mb_proc_str) {
1356                 g->p.mb_proc = atof(g->p.mb_proc_str);
1357                 BUG_ON(g->p.mb_proc < 0);
1358         }
1359
1360         if (g->p.mb_proc_locked_str) {
1361                 g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str);
1362                 BUG_ON(g->p.mb_proc_locked < 0);
1363                 BUG_ON(g->p.mb_proc_locked > g->p.mb_proc);
1364         }
1365
1366         if (g->p.mb_thread_str) {
1367                 g->p.mb_thread = atof(g->p.mb_thread_str);
1368                 BUG_ON(g->p.mb_thread < 0);
1369         }
1370
1371         BUG_ON(g->p.nr_threads <= 0);
1372         BUG_ON(g->p.nr_proc <= 0);
1373
1374         g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads;
1375
1376         g->p.bytes_global               = g->p.mb_global        *1024L*1024L;
1377         g->p.bytes_process              = g->p.mb_proc          *1024L*1024L;
1378         g->p.bytes_process_locked       = g->p.mb_proc_locked   *1024L*1024L;
1379         g->p.bytes_thread               = g->p.mb_thread        *1024L*1024L;
1380
1381         g->data = setup_shared_data(g->p.bytes_global);
1382
1383         /* Startup serialization: */
1384         init_global_mutex(&g->start_work_mutex);
1385         init_global_mutex(&g->startup_mutex);
1386         init_global_mutex(&g->startup_done_mutex);
1387         init_global_mutex(&g->stop_work_mutex);
1388
1389         init_thread_data();
1390
1391         tprintf("#\n");
1392         if (parse_setup_cpu_list() || parse_setup_node_list())
1393                 return -1;
1394         tprintf("#\n");
1395
1396         print_summary();
1397
1398         return 0;
1399 }
1400
1401 static void deinit(void)
1402 {
1403         free_data(g->data, g->p.bytes_global);
1404         g->data = NULL;
1405
1406         deinit_thread_data();
1407
1408         free_data(g, sizeof(*g));
1409         g = NULL;
1410 }
1411
1412 /*
1413  * Print a short or long result, depending on the verbosity setting:
1414  */
1415 static void print_res(const char *name, double val,
1416                       const char *txt_unit, const char *txt_short, const char *txt_long)
1417 {
1418         if (!name)
1419                 name = "main,";
1420
1421         if (!g->p.show_quiet)
1422                 printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
1423         else
1424                 printf(" %14.3f %s\n", val, txt_long);
1425 }
1426
1427 static int __bench_numa(const char *name)
1428 {
1429         struct timeval start, stop, diff;
1430         u64 runtime_ns_min, runtime_ns_sum;
1431         pid_t *pids, pid, wpid;
1432         double delta_runtime;
1433         double runtime_avg;
1434         double runtime_sec_max;
1435         double runtime_sec_min;
1436         int wait_stat;
1437         double bytes;
1438         int i, t, p;
1439
1440         if (init())
1441                 return -1;
1442
1443         pids = zalloc(g->p.nr_proc * sizeof(*pids));
1444         pid = -1;
1445
1446         /* All threads try to acquire it, this way we can wait for them to start up: */
1447         pthread_mutex_lock(&g->start_work_mutex);
1448
1449         if (g->p.serialize_startup) {
1450                 tprintf(" #\n");
1451                 tprintf(" # Startup synchronization: ..."); fflush(stdout);
1452         }
1453
1454         gettimeofday(&start, NULL);
1455
1456         for (i = 0; i < g->p.nr_proc; i++) {
1457                 pid = fork();
1458                 dprintf(" # process %2d: PID %d\n", i, pid);
1459
1460                 BUG_ON(pid < 0);
1461                 if (!pid) {
1462                         /* Child process: */
1463                         worker_process(i);
1464
1465                         exit(0);
1466                 }
1467                 pids[i] = pid;
1468
1469         }
1470         /* Wait for all the threads to start up: */
1471         while (g->nr_tasks_started != g->p.nr_tasks)
1472                 usleep(1000);
1473
1474         BUG_ON(g->nr_tasks_started != g->p.nr_tasks);
1475
1476         if (g->p.serialize_startup) {
1477                 double startup_sec;
1478
1479                 pthread_mutex_lock(&g->startup_done_mutex);
1480
1481                 /* This will start all threads: */
1482                 pthread_mutex_unlock(&g->start_work_mutex);
1483
1484                 /* This mutex is locked - the last started thread will wake us: */
1485                 pthread_mutex_lock(&g->startup_done_mutex);
1486
1487                 gettimeofday(&stop, NULL);
1488
1489                 timersub(&stop, &start, &diff);
1490
1491                 startup_sec = diff.tv_sec * 1000000000.0;
1492                 startup_sec += diff.tv_usec * 1000.0;
1493                 startup_sec /= 1e9;
1494
1495                 tprintf(" threads initialized in %.6f seconds.\n", startup_sec);
1496                 tprintf(" #\n");
1497
1498                 start = stop;
1499                 pthread_mutex_unlock(&g->startup_done_mutex);
1500         } else {
1501                 gettimeofday(&start, NULL);
1502         }
1503
1504         /* Parent process: */
1505
1506
1507         for (i = 0; i < g->p.nr_proc; i++) {
1508                 wpid = waitpid(pids[i], &wait_stat, 0);
1509                 BUG_ON(wpid < 0);
1510                 BUG_ON(!WIFEXITED(wait_stat));
1511
1512         }
1513
1514         runtime_ns_sum = 0;
1515         runtime_ns_min = -1LL;
1516
1517         for (t = 0; t < g->p.nr_tasks; t++) {
1518                 u64 thread_runtime_ns = g->threads[t].runtime_ns;
1519
1520                 runtime_ns_sum += thread_runtime_ns;
1521                 runtime_ns_min = min(thread_runtime_ns, runtime_ns_min);
1522         }
1523
1524         gettimeofday(&stop, NULL);
1525         timersub(&stop, &start, &diff);
1526
1527         BUG_ON(bench_format != BENCH_FORMAT_DEFAULT);
1528
1529         tprintf("\n ###\n");
1530         tprintf("\n");
1531
1532         runtime_sec_max = diff.tv_sec * 1000000000.0;
1533         runtime_sec_max += diff.tv_usec * 1000.0;
1534         runtime_sec_max /= 1e9;
1535
1536         runtime_sec_min = runtime_ns_min/1e9;
1537
1538         bytes = g->bytes_done;
1539         runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / 1e9;
1540
1541         if (g->p.measure_convergence) {
1542                 print_res(name, runtime_sec_max,
1543                         "secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge");
1544         }
1545
1546         print_res(name, runtime_sec_max,
1547                 "secs,", "runtime-max/thread",  "secs slowest (max) thread-runtime");
1548
1549         print_res(name, runtime_sec_min,
1550                 "secs,", "runtime-min/thread",  "secs fastest (min) thread-runtime");
1551
1552         print_res(name, runtime_avg,
1553                 "secs,", "runtime-avg/thread",  "secs average thread-runtime");
1554
1555         delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0;
1556         print_res(name, delta_runtime / runtime_sec_max * 100.0,
1557                 "%,", "spread-runtime/thread",  "% difference between max/avg runtime");
1558
1559         print_res(name, bytes / g->p.nr_tasks / 1e9,
1560                 "GB,", "data/thread",           "GB data processed, per thread");
1561
1562         print_res(name, bytes / 1e9,
1563                 "GB,", "data-total",            "GB data processed, total");
1564
1565         print_res(name, runtime_sec_max * 1e9 / (bytes / g->p.nr_tasks),
1566                 "nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime");
1567
1568         print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max,
1569                 "GB/sec,", "thread-speed",      "GB/sec/thread speed");
1570
1571         print_res(name, bytes / runtime_sec_max / 1e9,
1572                 "GB/sec,", "total-speed",       "GB/sec total speed");
1573
1574         if (g->p.show_details >= 2) {
1575                 char tname[32];
1576                 struct thread_data *td;
1577                 for (p = 0; p < g->p.nr_proc; p++) {
1578                         for (t = 0; t < g->p.nr_threads; t++) {
1579                                 memset(tname, 0, 32);
1580                                 td = g->threads + p*g->p.nr_threads + t;
1581                                 snprintf(tname, 32, "process%d:thread%d", p, t);
1582                                 print_res(tname, td->speed_gbs,
1583                                         "GB/sec",       "thread-speed", "GB/sec/thread speed");
1584                                 print_res(tname, td->system_time_ns / 1e9,
1585                                         "secs", "thread-system-time", "system CPU time/thread");
1586                                 print_res(tname, td->user_time_ns / 1e9,
1587                                         "secs", "thread-user-time", "user CPU time/thread");
1588                         }
1589                 }
1590         }
1591
1592         free(pids);
1593
1594         deinit();
1595
1596         return 0;
1597 }
1598
1599 #define MAX_ARGS 50
1600
1601 static int command_size(const char **argv)
1602 {
1603         int size = 0;
1604
1605         while (*argv) {
1606                 size++;
1607                 argv++;
1608         }
1609
1610         BUG_ON(size >= MAX_ARGS);
1611
1612         return size;
1613 }
1614
1615 static void init_params(struct params *p, const char *name, int argc, const char **argv)
1616 {
1617         int i;
1618
1619         printf("\n # Running %s \"perf bench numa", name);
1620
1621         for (i = 0; i < argc; i++)
1622                 printf(" %s", argv[i]);
1623
1624         printf("\"\n");
1625
1626         memset(p, 0, sizeof(*p));
1627
1628         /* Initialize nonzero defaults: */
1629
1630         p->serialize_startup            = 1;
1631         p->data_reads                   = true;
1632         p->data_writes                  = true;
1633         p->data_backwards               = true;
1634         p->data_rand_walk               = true;
1635         p->nr_loops                     = -1;
1636         p->init_random                  = true;
1637         p->mb_global_str                = "1";
1638         p->nr_proc                      = 1;
1639         p->nr_threads                   = 1;
1640         p->nr_secs                      = 5;
1641         p->run_all                      = argc == 1;
1642 }
1643
1644 static int run_bench_numa(const char *name, const char **argv)
1645 {
1646         int argc = command_size(argv);
1647
1648         init_params(&p0, name, argc, argv);
1649         argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1650         if (argc)
1651                 goto err;
1652
1653         if (__bench_numa(name))
1654                 goto err;
1655
1656         return 0;
1657
1658 err:
1659         return -1;
1660 }
1661
1662 #define OPT_BW_RAM              "-s",  "20", "-zZq",    "--thp", " 1", "--no-data_rand_walk"
1663 #define OPT_BW_RAM_NOTHP        OPT_BW_RAM,             "--thp", "-1"
1664
1665 #define OPT_CONV                "-s", "100", "-zZ0qcm", "--thp", " 1"
1666 #define OPT_CONV_NOTHP          OPT_CONV,               "--thp", "-1"
1667
1668 #define OPT_BW                  "-s",  "20", "-zZ0q",   "--thp", " 1"
1669 #define OPT_BW_NOTHP            OPT_BW,                 "--thp", "-1"
1670
1671 /*
1672  * The built-in test-suite executed by "perf bench numa -a".
1673  *
1674  * (A minimum of 4 nodes and 16 GB of RAM is recommended.)
1675  */
1676 static const char *tests[][MAX_ARGS] = {
1677    /* Basic single-stream NUMA bandwidth measurements: */
1678    { "RAM-bw-local,",     "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1679                           "-C" ,   "0", "-M",   "0", OPT_BW_RAM },
1680    { "RAM-bw-local-NOTHP,",
1681                           "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1682                           "-C" ,   "0", "-M",   "0", OPT_BW_RAM_NOTHP },
1683    { "RAM-bw-remote,",    "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1684                           "-C" ,   "0", "-M",   "1", OPT_BW_RAM },
1685
1686    /* 2-stream NUMA bandwidth measurements: */
1687    { "RAM-bw-local-2x,",  "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1688                            "-C", "0,2", "-M", "0x2", OPT_BW_RAM },
1689    { "RAM-bw-remote-2x,", "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1690                            "-C", "0,2", "-M", "1x2", OPT_BW_RAM },
1691
1692    /* Cross-stream NUMA bandwidth measurement: */
1693    { "RAM-bw-cross,",     "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1694                            "-C", "0,8", "-M", "1,0", OPT_BW_RAM },
1695
1696    /* Convergence latency measurements: */
1697    { " 1x3-convergence,", "mem",  "-p",  "1", "-t",  "3", "-P",  "512", OPT_CONV },
1698    { " 1x4-convergence,", "mem",  "-p",  "1", "-t",  "4", "-P",  "512", OPT_CONV },
1699    { " 1x6-convergence,", "mem",  "-p",  "1", "-t",  "6", "-P", "1020", OPT_CONV },
1700    { " 2x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
1701    { " 3x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
1702    { " 4x4-convergence,", "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV },
1703    { " 4x4-convergence-NOTHP,",
1704                           "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1705    { " 4x6-convergence,", "mem",  "-p",  "4", "-t",  "6", "-P", "1020", OPT_CONV },
1706    { " 4x8-convergence,", "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_CONV },
1707    { " 8x4-convergence,", "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV },
1708    { " 8x4-convergence-NOTHP,",
1709                           "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1710    { " 3x1-convergence,", "mem",  "-p",  "3", "-t",  "1", "-P",  "512", OPT_CONV },
1711    { " 4x1-convergence,", "mem",  "-p",  "4", "-t",  "1", "-P",  "512", OPT_CONV },
1712    { " 8x1-convergence,", "mem",  "-p",  "8", "-t",  "1", "-P",  "512", OPT_CONV },
1713    { "16x1-convergence,", "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_CONV },
1714    { "32x1-convergence,", "mem",  "-p", "32", "-t",  "1", "-P",  "128", OPT_CONV },
1715
1716    /* Various NUMA process/thread layout bandwidth measurements: */
1717    { " 2x1-bw-process,",  "mem",  "-p",  "2", "-t",  "1", "-P", "1024", OPT_BW },
1718    { " 3x1-bw-process,",  "mem",  "-p",  "3", "-t",  "1", "-P", "1024", OPT_BW },
1719    { " 4x1-bw-process,",  "mem",  "-p",  "4", "-t",  "1", "-P", "1024", OPT_BW },
1720    { " 8x1-bw-process,",  "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW },
1721    { " 8x1-bw-process-NOTHP,",
1722                           "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW_NOTHP },
1723    { "16x1-bw-process,",  "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_BW },
1724
1725    { " 4x1-bw-thread,",   "mem",  "-p",  "1", "-t",  "4", "-T",  "256", OPT_BW },
1726    { " 8x1-bw-thread,",   "mem",  "-p",  "1", "-t",  "8", "-T",  "256", OPT_BW },
1727    { "16x1-bw-thread,",   "mem",  "-p",  "1", "-t", "16", "-T",  "128", OPT_BW },
1728    { "32x1-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-T",   "64", OPT_BW },
1729
1730    { " 2x3-bw-thread,",   "mem",  "-p",  "2", "-t",  "3", "-P",  "512", OPT_BW },
1731    { " 4x4-bw-thread,",   "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_BW },
1732    { " 4x6-bw-thread,",   "mem",  "-p",  "4", "-t",  "6", "-P",  "512", OPT_BW },
1733    { " 4x8-bw-thread,",   "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW },
1734    { " 4x8-bw-thread-NOTHP,",
1735                           "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW_NOTHP },
1736    { " 3x3-bw-thread,",   "mem",  "-p",  "3", "-t",  "3", "-P",  "512", OPT_BW },
1737    { " 5x5-bw-thread,",   "mem",  "-p",  "5", "-t",  "5", "-P",  "512", OPT_BW },
1738
1739    { "2x16-bw-thread,",   "mem",  "-p",  "2", "-t", "16", "-P",  "512", OPT_BW },
1740    { "1x32-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-P", "2048", OPT_BW },
1741
1742    { "numa02-bw,",        "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW },
1743    { "numa02-bw-NOTHP,",  "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW_NOTHP },
1744    { "numa01-bw-thread,", "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW },
1745    { "numa01-bw-thread-NOTHP,",
1746                           "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW_NOTHP },
1747 };
1748
1749 static int bench_all(void)
1750 {
1751         int nr = ARRAY_SIZE(tests);
1752         int ret;
1753         int i;
1754
1755         ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'");
1756         BUG_ON(ret < 0);
1757
1758         for (i = 0; i < nr; i++) {
1759                 run_bench_numa(tests[i][0], tests[i] + 1);
1760         }
1761
1762         printf("\n");
1763
1764         return 0;
1765 }
1766
1767 int bench_numa(int argc, const char **argv, const char *prefix __maybe_unused)
1768 {
1769         init_params(&p0, "main,", argc, argv);
1770         argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1771         if (argc)
1772                 goto err;
1773
1774         if (p0.run_all)
1775                 return bench_all();
1776
1777         if (__bench_numa(NULL))
1778                 goto err;
1779
1780         return 0;
1781
1782 err:
1783         usage_with_options(numa_usage, options);
1784         return -1;
1785 }