x86/smpboot: Init apic mapping before usage
[cascardo/linux.git] / kernel / bpf / percpu_freelist.c
1 /* Copyright (c) 2016 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include "percpu_freelist.h"
8
9 int pcpu_freelist_init(struct pcpu_freelist *s)
10 {
11         int cpu;
12
13         s->freelist = alloc_percpu(struct pcpu_freelist_head);
14         if (!s->freelist)
15                 return -ENOMEM;
16
17         for_each_possible_cpu(cpu) {
18                 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
19
20                 raw_spin_lock_init(&head->lock);
21                 head->first = NULL;
22         }
23         return 0;
24 }
25
26 void pcpu_freelist_destroy(struct pcpu_freelist *s)
27 {
28         free_percpu(s->freelist);
29 }
30
31 static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
32                                         struct pcpu_freelist_node *node)
33 {
34         raw_spin_lock(&head->lock);
35         node->next = head->first;
36         head->first = node;
37         raw_spin_unlock(&head->lock);
38 }
39
40 void pcpu_freelist_push(struct pcpu_freelist *s,
41                         struct pcpu_freelist_node *node)
42 {
43         struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
44
45         __pcpu_freelist_push(head, node);
46 }
47
48 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
49                             u32 nr_elems)
50 {
51         struct pcpu_freelist_head *head;
52         unsigned long flags;
53         int i, cpu, pcpu_entries;
54
55         pcpu_entries = nr_elems / num_possible_cpus() + 1;
56         i = 0;
57
58         /* disable irq to workaround lockdep false positive
59          * in bpf usage pcpu_freelist_populate() will never race
60          * with pcpu_freelist_push()
61          */
62         local_irq_save(flags);
63         for_each_possible_cpu(cpu) {
64 again:
65                 head = per_cpu_ptr(s->freelist, cpu);
66                 __pcpu_freelist_push(head, buf);
67                 i++;
68                 buf += elem_size;
69                 if (i == nr_elems)
70                         break;
71                 if (i % pcpu_entries)
72                         goto again;
73         }
74         local_irq_restore(flags);
75 }
76
77 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
78 {
79         struct pcpu_freelist_head *head;
80         struct pcpu_freelist_node *node;
81         int orig_cpu, cpu;
82
83         orig_cpu = cpu = raw_smp_processor_id();
84         while (1) {
85                 head = per_cpu_ptr(s->freelist, cpu);
86                 raw_spin_lock(&head->lock);
87                 node = head->first;
88                 if (node) {
89                         head->first = node->next;
90                         raw_spin_unlock(&head->lock);
91                         return node;
92                 }
93                 raw_spin_unlock(&head->lock);
94                 cpu = cpumask_next(cpu, cpu_possible_mask);
95                 if (cpu >= nr_cpu_ids)
96                         cpu = 0;
97                 if (cpu == orig_cpu)
98                         return NULL;
99         }
100 }