dma-buf/fence: Fix a kerneldoc warning
[cascardo/linux.git] / drivers / dma-buf / fence.c
1 /*
2  * Fence mechanism for dma-buf and to allow for asynchronous dma access
3  *
4  * Copyright (C) 2012 Canonical Ltd
5  * Copyright (C) 2012 Texas Instruments
6  *
7  * Authors:
8  * Rob Clark <robdclark@gmail.com>
9  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
10  *
11  * This program is free software; you can redistribute it and/or modify it
12  * under the terms of the GNU General Public License version 2 as published by
13  * the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  * more details.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/atomic.h>
24 #include <linux/fence.h>
25
26 #define CREATE_TRACE_POINTS
27 #include <trace/events/fence.h>
28
29 EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
30 EXPORT_TRACEPOINT_SYMBOL(fence_emit);
31
32 /*
33  * fence context counter: each execution context should have its own
34  * fence context, this allows checking if fences belong to the same
35  * context or not. One device can have multiple separate contexts,
36  * and they're used if some engine can run independently of another.
37  */
38 static atomic_t fence_context_counter = ATOMIC_INIT(0);
39
40 /**
41  * fence_context_alloc - allocate an array of fence contexts
42  * @num:        [in]    amount of contexts to allocate
43  *
44  * This function will return the first index of the number of fences allocated.
45  * The fence context is used for setting fence->context to a unique number.
46  */
47 unsigned fence_context_alloc(unsigned num)
48 {
49         BUG_ON(!num);
50         return atomic_add_return(num, &fence_context_counter) - num;
51 }
52 EXPORT_SYMBOL(fence_context_alloc);
53
54 /**
55  * fence_signal_locked - signal completion of a fence
56  * @fence: the fence to signal
57  *
58  * Signal completion for software callbacks on a fence, this will unblock
59  * fence_wait() calls and run all the callbacks added with
60  * fence_add_callback(). Can be called multiple times, but since a fence
61  * can only go from unsignaled to signaled state, it will only be effective
62  * the first time.
63  *
64  * Unlike fence_signal, this function must be called with fence->lock held.
65  */
66 int fence_signal_locked(struct fence *fence)
67 {
68         struct fence_cb *cur, *tmp;
69         int ret = 0;
70
71         if (WARN_ON(!fence))
72                 return -EINVAL;
73
74         if (!ktime_to_ns(fence->timestamp)) {
75                 fence->timestamp = ktime_get();
76                 smp_mb__before_atomic();
77         }
78
79         if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
80                 ret = -EINVAL;
81
82                 /*
83                  * we might have raced with the unlocked fence_signal,
84                  * still run through all callbacks
85                  */
86         } else
87                 trace_fence_signaled(fence);
88
89         list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
90                 list_del_init(&cur->node);
91                 cur->func(fence, cur);
92         }
93         return ret;
94 }
95 EXPORT_SYMBOL(fence_signal_locked);
96
97 /**
98  * fence_signal - signal completion of a fence
99  * @fence: the fence to signal
100  *
101  * Signal completion for software callbacks on a fence, this will unblock
102  * fence_wait() calls and run all the callbacks added with
103  * fence_add_callback(). Can be called multiple times, but since a fence
104  * can only go from unsignaled to signaled state, it will only be effective
105  * the first time.
106  */
107 int fence_signal(struct fence *fence)
108 {
109         unsigned long flags;
110
111         if (!fence)
112                 return -EINVAL;
113
114         if (!ktime_to_ns(fence->timestamp)) {
115                 fence->timestamp = ktime_get();
116                 smp_mb__before_atomic();
117         }
118
119         if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
120                 return -EINVAL;
121
122         trace_fence_signaled(fence);
123
124         if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
125                 struct fence_cb *cur, *tmp;
126
127                 spin_lock_irqsave(fence->lock, flags);
128                 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
129                         list_del_init(&cur->node);
130                         cur->func(fence, cur);
131                 }
132                 spin_unlock_irqrestore(fence->lock, flags);
133         }
134         return 0;
135 }
136 EXPORT_SYMBOL(fence_signal);
137
138 /**
139  * fence_wait_timeout - sleep until the fence gets signaled
140  * or until timeout elapses
141  * @fence:      [in]    the fence to wait on
142  * @intr:       [in]    if true, do an interruptible wait
143  * @timeout:    [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
144  *
145  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
146  * remaining timeout in jiffies on success. Other error values may be
147  * returned on custom implementations.
148  *
149  * Performs a synchronous wait on this fence. It is assumed the caller
150  * directly or indirectly (buf-mgr between reservation and committing)
151  * holds a reference to the fence, otherwise the fence might be
152  * freed before return, resulting in undefined behavior.
153  */
154 signed long
155 fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
156 {
157         signed long ret;
158
159         if (WARN_ON(timeout < 0))
160                 return -EINVAL;
161
162         trace_fence_wait_start(fence);
163         ret = fence->ops->wait(fence, intr, timeout);
164         trace_fence_wait_end(fence);
165         return ret;
166 }
167 EXPORT_SYMBOL(fence_wait_timeout);
168
169 void fence_release(struct kref *kref)
170 {
171         struct fence *fence =
172                         container_of(kref, struct fence, refcount);
173
174         trace_fence_destroy(fence);
175
176         BUG_ON(!list_empty(&fence->cb_list));
177
178         if (fence->ops->release)
179                 fence->ops->release(fence);
180         else
181                 fence_free(fence);
182 }
183 EXPORT_SYMBOL(fence_release);
184
185 void fence_free(struct fence *fence)
186 {
187         kfree_rcu(fence, rcu);
188 }
189 EXPORT_SYMBOL(fence_free);
190
191 /**
192  * fence_enable_sw_signaling - enable signaling on fence
193  * @fence:      [in]    the fence to enable
194  *
195  * this will request for sw signaling to be enabled, to make the fence
196  * complete as soon as possible
197  */
198 void fence_enable_sw_signaling(struct fence *fence)
199 {
200         unsigned long flags;
201
202         if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
203             !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
204                 trace_fence_enable_signal(fence);
205
206                 spin_lock_irqsave(fence->lock, flags);
207
208                 if (!fence->ops->enable_signaling(fence))
209                         fence_signal_locked(fence);
210
211                 spin_unlock_irqrestore(fence->lock, flags);
212         }
213 }
214 EXPORT_SYMBOL(fence_enable_sw_signaling);
215
216 /**
217  * fence_add_callback - add a callback to be called when the fence
218  * is signaled
219  * @fence:      [in]    the fence to wait on
220  * @cb:         [in]    the callback to register
221  * @func:       [in]    the function to call
222  *
223  * cb will be initialized by fence_add_callback, no initialization
224  * by the caller is required. Any number of callbacks can be registered
225  * to a fence, but a callback can only be registered to one fence at a time.
226  *
227  * Note that the callback can be called from an atomic context.  If
228  * fence is already signaled, this function will return -ENOENT (and
229  * *not* call the callback)
230  *
231  * Add a software callback to the fence. Same restrictions apply to
232  * refcount as it does to fence_wait, however the caller doesn't need to
233  * keep a refcount to fence afterwards: when software access is enabled,
234  * the creator of the fence is required to keep the fence alive until
235  * after it signals with fence_signal. The callback itself can be called
236  * from irq context.
237  *
238  */
239 int fence_add_callback(struct fence *fence, struct fence_cb *cb,
240                        fence_func_t func)
241 {
242         unsigned long flags;
243         int ret = 0;
244         bool was_set;
245
246         if (WARN_ON(!fence || !func))
247                 return -EINVAL;
248
249         if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
250                 INIT_LIST_HEAD(&cb->node);
251                 return -ENOENT;
252         }
253
254         spin_lock_irqsave(fence->lock, flags);
255
256         was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
257
258         if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
259                 ret = -ENOENT;
260         else if (!was_set) {
261                 trace_fence_enable_signal(fence);
262
263                 if (!fence->ops->enable_signaling(fence)) {
264                         fence_signal_locked(fence);
265                         ret = -ENOENT;
266                 }
267         }
268
269         if (!ret) {
270                 cb->func = func;
271                 list_add_tail(&cb->node, &fence->cb_list);
272         } else
273                 INIT_LIST_HEAD(&cb->node);
274         spin_unlock_irqrestore(fence->lock, flags);
275
276         return ret;
277 }
278 EXPORT_SYMBOL(fence_add_callback);
279
280 /**
281  * fence_remove_callback - remove a callback from the signaling list
282  * @fence:      [in]    the fence to wait on
283  * @cb:         [in]    the callback to remove
284  *
285  * Remove a previously queued callback from the fence. This function returns
286  * true if the callback is succesfully removed, or false if the fence has
287  * already been signaled.
288  *
289  * *WARNING*:
290  * Cancelling a callback should only be done if you really know what you're
291  * doing, since deadlocks and race conditions could occur all too easily. For
292  * this reason, it should only ever be done on hardware lockup recovery,
293  * with a reference held to the fence.
294  */
295 bool
296 fence_remove_callback(struct fence *fence, struct fence_cb *cb)
297 {
298         unsigned long flags;
299         bool ret;
300
301         spin_lock_irqsave(fence->lock, flags);
302
303         ret = !list_empty(&cb->node);
304         if (ret)
305                 list_del_init(&cb->node);
306
307         spin_unlock_irqrestore(fence->lock, flags);
308
309         return ret;
310 }
311 EXPORT_SYMBOL(fence_remove_callback);
312
313 struct default_wait_cb {
314         struct fence_cb base;
315         struct task_struct *task;
316 };
317
318 static void
319 fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
320 {
321         struct default_wait_cb *wait =
322                 container_of(cb, struct default_wait_cb, base);
323
324         wake_up_state(wait->task, TASK_NORMAL);
325 }
326
327 /**
328  * fence_default_wait - default sleep until the fence gets signaled
329  * or until timeout elapses
330  * @fence:      [in]    the fence to wait on
331  * @intr:       [in]    if true, do an interruptible wait
332  * @timeout:    [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
333  *
334  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
335  * remaining timeout in jiffies on success.
336  */
337 signed long
338 fence_default_wait(struct fence *fence, bool intr, signed long timeout)
339 {
340         struct default_wait_cb cb;
341         unsigned long flags;
342         signed long ret = timeout;
343         bool was_set;
344
345         if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
346                 return timeout;
347
348         spin_lock_irqsave(fence->lock, flags);
349
350         if (intr && signal_pending(current)) {
351                 ret = -ERESTARTSYS;
352                 goto out;
353         }
354
355         was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
356
357         if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
358                 goto out;
359
360         if (!was_set) {
361                 trace_fence_enable_signal(fence);
362
363                 if (!fence->ops->enable_signaling(fence)) {
364                         fence_signal_locked(fence);
365                         goto out;
366                 }
367         }
368
369         cb.base.func = fence_default_wait_cb;
370         cb.task = current;
371         list_add(&cb.base.node, &fence->cb_list);
372
373         while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
374                 if (intr)
375                         __set_current_state(TASK_INTERRUPTIBLE);
376                 else
377                         __set_current_state(TASK_UNINTERRUPTIBLE);
378                 spin_unlock_irqrestore(fence->lock, flags);
379
380                 ret = schedule_timeout(ret);
381
382                 spin_lock_irqsave(fence->lock, flags);
383                 if (ret > 0 && intr && signal_pending(current))
384                         ret = -ERESTARTSYS;
385         }
386
387         if (!list_empty(&cb.base.node))
388                 list_del(&cb.base.node);
389         __set_current_state(TASK_RUNNING);
390
391 out:
392         spin_unlock_irqrestore(fence->lock, flags);
393         return ret;
394 }
395 EXPORT_SYMBOL(fence_default_wait);
396
397 /**
398  * fence_init - Initialize a custom fence.
399  * @fence:      [in]    the fence to initialize
400  * @ops:        [in]    the fence_ops for operations on this fence
401  * @lock:       [in]    the irqsafe spinlock to use for locking this fence
402  * @context:    [in]    the execution context this fence is run on
403  * @seqno:      [in]    a linear increasing sequence number for this context
404  *
405  * Initializes an allocated fence, the caller doesn't have to keep its
406  * refcount after committing with this fence, but it will need to hold a
407  * refcount again if fence_ops.enable_signaling gets called. This can
408  * be used for other implementing other types of fence.
409  *
410  * context and seqno are used for easy comparison between fences, allowing
411  * to check which fence is later by simply using fence_later.
412  */
413 void
414 fence_init(struct fence *fence, const struct fence_ops *ops,
415              spinlock_t *lock, unsigned context, unsigned seqno)
416 {
417         BUG_ON(!lock);
418         BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
419                !ops->get_driver_name || !ops->get_timeline_name);
420
421         kref_init(&fence->refcount);
422         fence->ops = ops;
423         INIT_LIST_HEAD(&fence->cb_list);
424         fence->lock = lock;
425         fence->context = context;
426         fence->seqno = seqno;
427         fence->flags = 0UL;
428
429         trace_fence_init(fence);
430 }
431 EXPORT_SYMBOL(fence_init);