netdev-dpdk: fix mbuf leaks
[cascardo/ovs.git] / lib / ovs-atomic-gcc4+.h
1 /*
2  * Copyright (c) 2013, 2014 Nicira, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 /* This header implements atomic operation primitives on GCC 4.x. */
18 #ifndef IN_OVS_ATOMIC_H
19 #error "This header should only be included indirectly via ovs-atomic.h."
20 #endif
21
22 #include "ovs-atomic-locked.h"
23 #define OVS_ATOMIC_GCC4P_IMPL 1
24
25 #define ATOMIC(TYPE) TYPE
26
27 #define ATOMIC_BOOL_LOCK_FREE 2
28 #define ATOMIC_CHAR_LOCK_FREE 2
29 #define ATOMIC_SHORT_LOCK_FREE 2
30 #define ATOMIC_INT_LOCK_FREE 2
31 #define ATOMIC_LONG_LOCK_FREE (ULONG_MAX <= UINTPTR_MAX ? 2 : 0)
32 #define ATOMIC_LLONG_LOCK_FREE (ULLONG_MAX <= UINTPTR_MAX ? 2 : 0)
33 #define ATOMIC_POINTER_LOCK_FREE 2
34
35 typedef enum {
36     memory_order_relaxed,
37     memory_order_consume,
38     memory_order_acquire,
39     memory_order_release,
40     memory_order_acq_rel,
41     memory_order_seq_cst
42 } memory_order;
43 \f
44 #define IS_LOCKLESS_ATOMIC(OBJECT) (sizeof(OBJECT) <= sizeof(void *))
45 \f
46 #define ATOMIC_VAR_INIT(VALUE) VALUE
47 #define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
48
49 static inline void
50 atomic_thread_fence(memory_order order)
51 {
52     if (order != memory_order_relaxed) {
53         __sync_synchronize();
54     }
55 }
56
57 static inline void
58 atomic_thread_fence_if_seq_cst(memory_order order)
59 {
60     if (order == memory_order_seq_cst) {
61         __sync_synchronize();
62     }
63 }
64
65 static inline void
66 atomic_signal_fence(memory_order order)
67 {
68     if (order != memory_order_relaxed) {
69         asm volatile("" : : : "memory");
70     }
71 }
72
73 #define atomic_is_lock_free(OBJ)                \
74     ((void) *(OBJ),                             \
75      IS_LOCKLESS_ATOMIC(*(OBJ)) ? 2 : 0)
76
77 #define atomic_store(DST, SRC) \
78     atomic_store_explicit(DST, SRC, memory_order_seq_cst)
79 #define atomic_store_explicit(DST, SRC, ORDER)          \
80     ({                                                  \
81         typeof(DST) dst__ = (DST);                      \
82         typeof(SRC) src__ = (SRC);                      \
83                                                         \
84         if (IS_LOCKLESS_ATOMIC(*dst__)) {               \
85             atomic_thread_fence(ORDER);                 \
86             *(typeof(*(DST)) volatile *)dst__ = src__;  \
87             atomic_thread_fence_if_seq_cst(ORDER);      \
88         } else {                                        \
89             atomic_store_locked(dst__, src__);          \
90         }                                               \
91         (void) 0;                                       \
92     })
93 #define atomic_read(SRC, DST) \
94     atomic_read_explicit(SRC, DST, memory_order_seq_cst)
95 #define atomic_read_explicit(SRC, DST, ORDER)           \
96     ({                                                  \
97         typeof(DST) dst__ = (DST);                      \
98         typeof(SRC) src__ = (SRC);                      \
99                                                         \
100         if (IS_LOCKLESS_ATOMIC(*src__)) {               \
101             atomic_thread_fence_if_seq_cst(ORDER);      \
102             *dst__ = *(typeof(*(SRC)) volatile *)src__; \
103         } else {                                        \
104             atomic_read_locked(src__, dst__);           \
105         }                                               \
106         (void) 0;                                       \
107     })
108
109 #define atomic_compare_exchange_strong(DST, EXP, SRC)   \
110     ({                                                  \
111         typeof(DST) dst__ = (DST);                      \
112         typeof(EXP) expp__ = (EXP);                     \
113         typeof(SRC) src__ = (SRC);                      \
114         typeof(SRC) exp__ = *expp__;                    \
115         typeof(SRC) ret__;                              \
116                                                         \
117         ret__ = __sync_val_compare_and_swap(dst__, exp__, src__); \
118         if (ret__ != exp__) {                                     \
119             *expp__ = ret__;                                      \
120         }                                                         \
121         ret__ == exp__;                                           \
122     })
123 #define atomic_compare_exchange_strong_explicit(DST, EXP, SRC, ORD1, ORD2) \
124     ((void) (ORD1), (void) (ORD2), \
125      atomic_compare_exchange_strong(DST, EXP, SRC))
126 #define atomic_compare_exchange_weak            \
127     atomic_compare_exchange_strong
128 #define atomic_compare_exchange_weak_explicit   \
129     atomic_compare_exchange_strong_explicit
130
131 #define atomic_op__(RMW, OP, ARG, ORIG)                     \
132     ({                                                      \
133         typeof(RMW) rmw__ = (RMW);                          \
134         typeof(ARG) arg__ = (ARG);                          \
135         typeof(ORIG) orig__ = (ORIG);                       \
136                                                             \
137         if (IS_LOCKLESS_ATOMIC(*rmw__)) {                   \
138             *orig__ = __sync_fetch_and_##OP(rmw__, arg__);  \
139         } else {                                            \
140             atomic_op_locked(rmw__, OP, arg__, orig__);     \
141         }                                                   \
142         (void) 0;                                           \
143     })
144
145 #define atomic_add(RMW, ARG, ORIG) atomic_op__(RMW, add, ARG, ORIG)
146 #define atomic_sub(RMW, ARG, ORIG) atomic_op__(RMW, sub, ARG, ORIG)
147 #define atomic_or(RMW, ARG, ORIG) atomic_op__(RMW, or,  ARG, ORIG)
148 #define atomic_xor(RMW, ARG, ORIG) atomic_op__(RMW, xor, ARG, ORIG)
149 #define atomic_and(RMW, ARG, ORIG) atomic_op__(RMW, and, ARG, ORIG)
150
151 #define atomic_add_explicit(RMW, OPERAND, ORIG, ORDER)  \
152     ((void) (ORDER), atomic_add(RMW, OPERAND, ORIG))
153 #define atomic_sub_explicit(RMW, OPERAND, ORIG, ORDER)  \
154     ((void) (ORDER), atomic_sub(RMW, OPERAND, ORIG))
155 #define atomic_or_explicit(RMW, OPERAND, ORIG, ORDER)   \
156     ((void) (ORDER), atomic_or(RMW, OPERAND, ORIG))
157 #define atomic_xor_explicit(RMW, OPERAND, ORIG, ORDER)  \
158     ((void) (ORDER), atomic_xor(RMW, OPERAND, ORIG))
159 #define atomic_and_explicit(RMW, OPERAND, ORIG, ORDER)  \
160     ((void) (ORDER), atomic_and(RMW, OPERAND, ORIG))
161 \f
162 /* atomic_flag */
163
164 typedef struct {
165     int b;
166 } atomic_flag;
167 #define ATOMIC_FLAG_INIT { false }
168
169 static inline bool
170 atomic_flag_test_and_set_explicit(volatile atomic_flag *object,
171                                   memory_order order)
172 {
173     bool old;
174
175     /* __sync_lock_test_and_set() by itself is an acquire barrier.
176      * For anything higher additional barriers are needed. */
177     if (order > memory_order_acquire) {
178         atomic_thread_fence(order);
179     }
180     old = __sync_lock_test_and_set(&object->b, 1);
181     atomic_thread_fence_if_seq_cst(order);
182
183     return old;
184 }
185
186 #define atomic_flag_test_and_set(FLAG)                                  \
187     atomic_flag_test_and_set_explicit(FLAG, memory_order_seq_cst)
188
189 static inline void
190 atomic_flag_clear_explicit(volatile atomic_flag *object,
191                            memory_order order)
192 {
193     /* __sync_lock_release() by itself is a release barrier.  For
194      * anything else additional barrier may be needed. */
195     if (order != memory_order_release) {
196         atomic_thread_fence(order);
197     }
198     __sync_lock_release(&object->b);
199     atomic_thread_fence_if_seq_cst(order);
200 }
201
202 #define atomic_flag_clear(FLAG)                                 \
203     atomic_flag_clear_explicit(FLAG, memory_order_seq_cst)