1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
4 #include <linux/mmdebug.h>
5 #include <linux/preempt.h>
7 #include <linux/cpumask.h>
9 #include <linux/init.h>
11 #include <asm/percpu.h>
13 /* enough to cover all DEFINE_PER_CPUs in modules */
15 #define PERCPU_MODULE_RESERVE (8 << 10)
17 #define PERCPU_MODULE_RESERVE 0
20 #ifndef PERCPU_ENOUGH_ROOM
21 #define PERCPU_ENOUGH_ROOM \
22 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
23 PERCPU_MODULE_RESERVE)
26 /* minimum unit size, also is the maximum supported allocation size */
27 #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
30 * Percpu allocator can serve percpu allocations before slab is
31 * initialized which allows slab to depend on the percpu allocator.
32 * The following two parameters decide how much resource to
33 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
34 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
36 #define PERCPU_DYNAMIC_EARLY_SLOTS 128
37 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
40 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
41 * back on the first chunk for dynamic percpu allocation if arch is
42 * manually allocating and mapping it for faster access (as a part of
43 * large page mapping for example).
45 * The following values give between one and two pages of free space
46 * after typical minimal boot (2-way SMP, single disk and NIC) with
47 * both defconfig and a distro config on x86_64 and 32. More
48 * intelligent way to determine this would be nice.
50 #if BITS_PER_LONG > 32
51 #define PERCPU_DYNAMIC_RESERVE (20 << 10)
53 #define PERCPU_DYNAMIC_RESERVE (12 << 10)
56 extern void *pcpu_base_addr;
57 extern const unsigned long *pcpu_unit_offsets;
59 struct pcpu_group_info {
60 int nr_units; /* aligned # of units */
61 unsigned long base_offset; /* base address offset */
62 unsigned int *cpu_map; /* unit->cpu map, empty
63 * entries contain NR_CPUS */
66 struct pcpu_alloc_info {
73 size_t __ai_size; /* internal, don't use */
74 int nr_groups; /* 0 if grouping unnecessary */
75 struct pcpu_group_info groups[];
85 extern const char * const pcpu_fc_names[PCPU_FC_NR];
87 extern enum pcpu_fc pcpu_chosen_fc;
89 typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
91 typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
92 typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
93 typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
95 extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
97 extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
99 extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
102 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
103 extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
105 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
106 pcpu_fc_alloc_fn_t alloc_fn,
107 pcpu_fc_free_fn_t free_fn);
110 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
111 extern int __init pcpu_page_first_chunk(size_t reserved_size,
112 pcpu_fc_alloc_fn_t alloc_fn,
113 pcpu_fc_free_fn_t free_fn,
114 pcpu_fc_populate_pte_fn_t populate_pte_fn);
117 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
118 extern bool is_kernel_percpu_address(unsigned long addr);
120 #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
121 extern void __init setup_per_cpu_areas(void);
123 extern void __init percpu_init_late(void);
125 extern void __percpu *__alloc_percpu(size_t size, size_t align);
126 extern void free_percpu(void __percpu *__pdata);
127 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
129 #define alloc_percpu(type) \
130 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
133 * Branching function to split up a function into a set of functions that
134 * are called for different scalar sizes of the objects handled.
137 extern void __bad_size_call_parameter(void);
139 #ifdef CONFIG_DEBUG_PREEMPT
140 extern void __this_cpu_preempt_check(const char *op);
142 static inline void __this_cpu_preempt_check(const char *op) { }
145 #define __pcpu_size_call_return(stem, variable) \
146 ({ typeof(variable) pscr_ret__; \
147 __verify_pcpu_ptr(&(variable)); \
148 switch(sizeof(variable)) { \
149 case 1: pscr_ret__ = stem##1(variable);break; \
150 case 2: pscr_ret__ = stem##2(variable);break; \
151 case 4: pscr_ret__ = stem##4(variable);break; \
152 case 8: pscr_ret__ = stem##8(variable);break; \
154 __bad_size_call_parameter();break; \
159 #define __pcpu_size_call_return2(stem, variable, ...) \
161 typeof(variable) pscr2_ret__; \
162 __verify_pcpu_ptr(&(variable)); \
163 switch(sizeof(variable)) { \
164 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
165 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
166 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
167 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
169 __bad_size_call_parameter(); break; \
175 * Special handling for cmpxchg_double. cmpxchg_double is passed two
176 * percpu variables. The first has to be aligned to a double word
177 * boundary and the second has to follow directly thereafter.
178 * We enforce this on all architectures even if they don't support
179 * a double cmpxchg instruction, since it's a cheap requirement, and it
180 * avoids breaking the requirement for architectures with the instruction.
182 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
185 __verify_pcpu_ptr(&pcp1); \
186 BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
187 VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
188 VM_BUG_ON((unsigned long)(&pcp2) != \
189 (unsigned long)(&pcp1) + sizeof(pcp1)); \
190 switch(sizeof(pcp1)) { \
191 case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
192 case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
193 case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
194 case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
196 __bad_size_call_parameter(); break; \
201 #define __pcpu_size_call(stem, variable, ...) \
203 __verify_pcpu_ptr(&(variable)); \
204 switch(sizeof(variable)) { \
205 case 1: stem##1(variable, __VA_ARGS__);break; \
206 case 2: stem##2(variable, __VA_ARGS__);break; \
207 case 4: stem##4(variable, __VA_ARGS__);break; \
208 case 8: stem##8(variable, __VA_ARGS__);break; \
210 __bad_size_call_parameter();break; \
215 * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
217 * Optimized manipulation for memory allocated through the per cpu
218 * allocator or for addresses of per cpu variables.
220 * These operation guarantee exclusivity of access for other operations
221 * on the *same* processor. The assumption is that per cpu data is only
222 * accessed by a single processor instance (the current one).
224 * The first group is used for accesses that must be done in a
225 * preemption safe way since we know that the context is not preempt
226 * safe. Interrupts may occur. If the interrupt modifies the variable
227 * too then RMW actions will not be reliable.
229 * The arch code can provide optimized implementation by defining macros
230 * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per
231 * cpu atomic operations for 2 byte sized RMW actions. If arch code does
232 * not provide operations for a scalar size then the fallback in the
233 * generic code will be used.
236 #define _this_cpu_generic_read(pcp) \
237 ({ typeof(pcp) ret__; \
239 ret__ = *this_cpu_ptr(&(pcp)); \
244 # ifndef this_cpu_read_1
245 # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
247 # ifndef this_cpu_read_2
248 # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
250 # ifndef this_cpu_read_4
251 # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
253 # ifndef this_cpu_read_8
254 # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
256 # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
258 #define _this_cpu_generic_to_op(pcp, val, op) \
260 unsigned long flags; \
261 raw_local_irq_save(flags); \
262 *raw_cpu_ptr(&(pcp)) op val; \
263 raw_local_irq_restore(flags); \
266 # ifndef this_cpu_write_1
267 # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
269 # ifndef this_cpu_write_2
270 # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
272 # ifndef this_cpu_write_4
273 # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
275 # ifndef this_cpu_write_8
276 # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
278 # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
280 # ifndef this_cpu_add_1
281 # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
283 # ifndef this_cpu_add_2
284 # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
286 # ifndef this_cpu_add_4
287 # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
289 # ifndef this_cpu_add_8
290 # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
292 # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
294 # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val))
295 # define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
296 # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
298 # ifndef this_cpu_and_1
299 # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
301 # ifndef this_cpu_and_2
302 # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
304 # ifndef this_cpu_and_4
305 # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
307 # ifndef this_cpu_and_8
308 # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
310 # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
312 # ifndef this_cpu_or_1
313 # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
315 # ifndef this_cpu_or_2
316 # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
318 # ifndef this_cpu_or_4
319 # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
321 # ifndef this_cpu_or_8
322 # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
324 # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
326 #define _this_cpu_generic_add_return(pcp, val) \
329 unsigned long flags; \
330 raw_local_irq_save(flags); \
331 raw_cpu_add(pcp, val); \
332 ret__ = raw_cpu_read(pcp); \
333 raw_local_irq_restore(flags); \
337 # ifndef this_cpu_add_return_1
338 # define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
340 # ifndef this_cpu_add_return_2
341 # define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
343 # ifndef this_cpu_add_return_4
344 # define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
346 # ifndef this_cpu_add_return_8
347 # define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
349 # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
351 #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
352 #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
353 #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
355 #define _this_cpu_generic_xchg(pcp, nval) \
356 ({ typeof(pcp) ret__; \
357 unsigned long flags; \
358 raw_local_irq_save(flags); \
359 ret__ = raw_cpu_read(pcp); \
360 raw_cpu_write(pcp, nval); \
361 raw_local_irq_restore(flags); \
365 # ifndef this_cpu_xchg_1
366 # define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
368 # ifndef this_cpu_xchg_2
369 # define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
371 # ifndef this_cpu_xchg_4
372 # define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
374 # ifndef this_cpu_xchg_8
375 # define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
377 # define this_cpu_xchg(pcp, nval) \
378 __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
380 #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
383 unsigned long flags; \
384 raw_local_irq_save(flags); \
385 ret__ = raw_cpu_read(pcp); \
386 if (ret__ == (oval)) \
387 raw_cpu_write(pcp, nval); \
388 raw_local_irq_restore(flags); \
392 # ifndef this_cpu_cmpxchg_1
393 # define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
395 # ifndef this_cpu_cmpxchg_2
396 # define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
398 # ifndef this_cpu_cmpxchg_4
399 # define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
401 # ifndef this_cpu_cmpxchg_8
402 # define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
404 # define this_cpu_cmpxchg(pcp, oval, nval) \
405 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
408 * cmpxchg_double replaces two adjacent scalars at once. The first
409 * two parameters are per cpu variables which have to be of the same
410 * size. A truth value is returned to indicate success or failure
411 * (since a double register result is difficult to handle). There is
412 * very limited hardware support for these operations, so only certain
415 #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
418 unsigned long flags; \
419 raw_local_irq_save(flags); \
420 ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
421 oval1, oval2, nval1, nval2); \
422 raw_local_irq_restore(flags); \
426 # ifndef this_cpu_cmpxchg_double_1
427 # define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
428 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
430 # ifndef this_cpu_cmpxchg_double_2
431 # define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
432 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
434 # ifndef this_cpu_cmpxchg_double_4
435 # define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
436 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
438 # ifndef this_cpu_cmpxchg_double_8
439 # define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
440 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
442 # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
443 __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
446 * Generic percpu operations for contexts where we do not want to do
447 * any checks for preemptiosn.
449 * If there is no other protection through preempt disable and/or
450 * disabling interupts then one of these RMW operations can show unexpected
451 * behavior because the execution thread was rescheduled on another processor
452 * or an interrupt occurred and the same percpu variable was modified from
453 * the interrupt context.
455 # ifndef raw_cpu_read_1
456 # define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp)))
458 # ifndef raw_cpu_read_2
459 # define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp)))
461 # ifndef raw_cpu_read_4
462 # define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp)))
464 # ifndef raw_cpu_read_8
465 # define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp)))
467 # define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp))
469 #define raw_cpu_generic_to_op(pcp, val, op) \
471 *raw_cpu_ptr(&(pcp)) op val; \
474 # ifndef raw_cpu_write_1
475 # define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
477 # ifndef raw_cpu_write_2
478 # define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
480 # ifndef raw_cpu_write_4
481 # define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
483 # ifndef raw_cpu_write_8
484 # define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), =)
486 # define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val))
488 # ifndef raw_cpu_add_1
489 # define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
491 # ifndef raw_cpu_add_2
492 # define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
494 # ifndef raw_cpu_add_4
495 # define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
497 # ifndef raw_cpu_add_8
498 # define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=)
500 # define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val))
502 # define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val))
504 # define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1)
506 # define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1)
508 # ifndef raw_cpu_and_1
509 # define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
511 # ifndef raw_cpu_and_2
512 # define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
514 # ifndef raw_cpu_and_4
515 # define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
517 # ifndef raw_cpu_and_8
518 # define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=)
520 # define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val))
522 # ifndef raw_cpu_or_1
523 # define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
525 # ifndef raw_cpu_or_2
526 # define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
528 # ifndef raw_cpu_or_4
529 # define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
531 # ifndef raw_cpu_or_8
532 # define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=)
534 # define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val))
536 #define raw_cpu_generic_add_return(pcp, val) \
538 raw_cpu_add(pcp, val); \
542 # ifndef raw_cpu_add_return_1
543 # define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val)
545 # ifndef raw_cpu_add_return_2
546 # define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val)
548 # ifndef raw_cpu_add_return_4
549 # define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val)
551 # ifndef raw_cpu_add_return_8
552 # define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val)
554 # define raw_cpu_add_return(pcp, val) \
555 __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
557 #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val))
558 #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1)
559 #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1)
561 #define raw_cpu_generic_xchg(pcp, nval) \
562 ({ typeof(pcp) ret__; \
563 ret__ = raw_cpu_read(pcp); \
564 raw_cpu_write(pcp, nval); \
568 # ifndef raw_cpu_xchg_1
569 # define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
571 # ifndef raw_cpu_xchg_2
572 # define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
574 # ifndef raw_cpu_xchg_4
575 # define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
577 # ifndef raw_cpu_xchg_8
578 # define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
580 # define raw_cpu_xchg(pcp, nval) \
581 __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)
583 #define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
586 ret__ = raw_cpu_read(pcp); \
587 if (ret__ == (oval)) \
588 raw_cpu_write(pcp, nval); \
592 # ifndef raw_cpu_cmpxchg_1
593 # define raw_cpu_cmpxchg_1(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
595 # ifndef raw_cpu_cmpxchg_2
596 # define raw_cpu_cmpxchg_2(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
598 # ifndef raw_cpu_cmpxchg_4
599 # define raw_cpu_cmpxchg_4(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
601 # ifndef raw_cpu_cmpxchg_8
602 # define raw_cpu_cmpxchg_8(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval)
604 # define raw_cpu_cmpxchg(pcp, oval, nval) \
605 __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
607 #define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
610 if (raw_cpu_read(pcp1) == (oval1) && \
611 raw_cpu_read(pcp2) == (oval2)) { \
612 raw_cpu_write(pcp1, (nval1)); \
613 raw_cpu_write(pcp2, (nval2)); \
619 # ifndef raw_cpu_cmpxchg_double_1
620 # define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
621 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
623 # ifndef raw_cpu_cmpxchg_double_2
624 # define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
625 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
627 # ifndef raw_cpu_cmpxchg_double_4
628 # define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
629 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
631 # ifndef raw_cpu_cmpxchg_double_8
632 # define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
633 raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
635 # define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
636 __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
639 * Generic percpu operations for context that are safe from preemption/interrupts.
641 # define __this_cpu_read(pcp) \
642 (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
644 # define __this_cpu_write(pcp, val) \
645 do { __this_cpu_preempt_check("write"); \
646 __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \
649 # define __this_cpu_add(pcp, val) \
650 do { __this_cpu_preempt_check("add"); \
651 __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \
654 # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
655 # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
656 # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
658 # define __this_cpu_and(pcp, val) \
659 do { __this_cpu_preempt_check("and"); \
660 __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \
663 # define __this_cpu_or(pcp, val) \
664 do { __this_cpu_preempt_check("or"); \
665 __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \
668 # define __this_cpu_add_return(pcp, val) \
669 (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
671 #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
672 #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
673 #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
675 # define __this_cpu_xchg(pcp, nval) \
676 (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
678 # define __this_cpu_cmpxchg(pcp, oval, nval) \
679 (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
681 # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
682 (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
684 #endif /* __LINUX_PERCPU_H */