drm/nouveau/nouveau: Do not BUG_ON(!spin_is_locked()) on UP
[cascardo/linux.git] / drivers / gpu / drm / nouveau / core / core / notify.c
1 /*
2  * Copyright 2014 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs <bskeggs@redhat.com>
23  */
24
25 #include <core/client.h>
26 #include <core/event.h>
27 #include <core/notify.h>
28
29 #include <nvif/unpack.h>
30 #include <nvif/event.h>
31
32 static inline void
33 nvkm_notify_put_locked(struct nvkm_notify *notify)
34 {
35         if (notify->block++ == 0)
36                 nvkm_event_put(notify->event, notify->types, notify->index);
37 }
38
39 void
40 nvkm_notify_put(struct nvkm_notify *notify)
41 {
42         struct nvkm_event *event = notify->event;
43         unsigned long flags;
44         if (likely(event) &&
45             test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
46                 spin_lock_irqsave(&event->refs_lock, flags);
47                 nvkm_notify_put_locked(notify);
48                 spin_unlock_irqrestore(&event->refs_lock, flags);
49                 if (test_bit(NVKM_NOTIFY_WORK, &notify->flags))
50                         flush_work(&notify->work);
51         }
52 }
53
54 static inline void
55 nvkm_notify_get_locked(struct nvkm_notify *notify)
56 {
57         if (--notify->block == 0)
58                 nvkm_event_get(notify->event, notify->types, notify->index);
59 }
60
61 void
62 nvkm_notify_get(struct nvkm_notify *notify)
63 {
64         struct nvkm_event *event = notify->event;
65         unsigned long flags;
66         if (likely(event) &&
67             !test_and_set_bit(NVKM_NOTIFY_USER, &notify->flags)) {
68                 spin_lock_irqsave(&event->refs_lock, flags);
69                 nvkm_notify_get_locked(notify);
70                 spin_unlock_irqrestore(&event->refs_lock, flags);
71         }
72 }
73
74 static inline void
75 nvkm_notify_func(struct nvkm_notify *notify)
76 {
77         struct nvkm_event *event = notify->event;
78         int ret = notify->func(notify);
79         unsigned long flags;
80         if ((ret == NVKM_NOTIFY_KEEP) ||
81             !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
82                 spin_lock_irqsave(&event->refs_lock, flags);
83                 nvkm_notify_get_locked(notify);
84                 spin_unlock_irqrestore(&event->refs_lock, flags);
85         }
86 }
87
88 static void
89 nvkm_notify_work(struct work_struct *work)
90 {
91         struct nvkm_notify *notify = container_of(work, typeof(*notify), work);
92         nvkm_notify_func(notify);
93 }
94
95 void
96 nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
97 {
98         struct nvkm_event *event = notify->event;
99         unsigned long flags;
100
101         assert_spin_locked(&event->list_lock);
102         BUG_ON(size != notify->size);
103
104         spin_lock_irqsave(&event->refs_lock, flags);
105         if (notify->block) {
106                 spin_unlock_irqrestore(&event->refs_lock, flags);
107                 return;
108         }
109         nvkm_notify_put_locked(notify);
110         spin_unlock_irqrestore(&event->refs_lock, flags);
111
112         if (test_bit(NVKM_NOTIFY_WORK, &notify->flags)) {
113                 memcpy((void *)notify->data, data, size);
114                 schedule_work(&notify->work);
115         } else {
116                 notify->data = data;
117                 nvkm_notify_func(notify);
118                 notify->data = NULL;
119         }
120 }
121
122 void
123 nvkm_notify_fini(struct nvkm_notify *notify)
124 {
125         unsigned long flags;
126         if (notify->event) {
127                 nvkm_notify_put(notify);
128                 spin_lock_irqsave(&notify->event->list_lock, flags);
129                 list_del(&notify->head);
130                 spin_unlock_irqrestore(&notify->event->list_lock, flags);
131                 kfree((void *)notify->data);
132                 notify->event = NULL;
133         }
134 }
135
136 int
137 nvkm_notify_init(struct nouveau_object *object, struct nvkm_event *event,
138                  int (*func)(struct nvkm_notify *), bool work,
139                  void *data, u32 size, u32 reply,
140                  struct nvkm_notify *notify)
141 {
142         unsigned long flags;
143         int ret = -ENODEV;
144         if ((notify->event = event), event->refs) {
145                 ret = event->func->ctor(object, data, size, notify);
146                 if (ret == 0 && (ret = -EINVAL, notify->size == reply)) {
147                         notify->flags = 0;
148                         notify->block = 1;
149                         notify->func = func;
150                         notify->data = NULL;
151                         if (ret = 0, work) {
152                                 INIT_WORK(&notify->work, nvkm_notify_work);
153                                 set_bit(NVKM_NOTIFY_WORK, &notify->flags);
154                                 notify->data = kmalloc(reply, GFP_KERNEL);
155                                 if (!notify->data)
156                                         ret = -ENOMEM;
157                         }
158                 }
159                 if (ret == 0) {
160                         spin_lock_irqsave(&event->list_lock, flags);
161                         list_add_tail(&notify->head, &event->list);
162                         spin_unlock_irqrestore(&event->list_lock, flags);
163                 }
164         }
165         if (ret)
166                 notify->event = NULL;
167         return ret;
168 }