min/max: remove sparse warnings when they're nested
[cascardo/linux.git] / drivers / staging / greybus / hd.c
1 /*
2  * Greybus Host Device
3  *
4  * Copyright 2014-2015 Google Inc.
5  * Copyright 2014-2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12
13 #include "greybus.h"
14 #include "greybus_trace.h"
15
16 EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
17 EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release);
18 EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add);
19 EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del);
20 EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in);
21 EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit);
22
23 static struct ida gb_hd_bus_id_map;
24
25 int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
26                  bool async)
27 {
28         if (!hd || !hd->driver || !hd->driver->output)
29                 return -EINVAL;
30         return hd->driver->output(hd, req, size, cmd, async);
31 }
32 EXPORT_SYMBOL_GPL(gb_hd_output);
33
34 static ssize_t bus_id_show(struct device *dev,
35                                 struct device_attribute *attr, char *buf)
36 {
37         struct gb_host_device *hd = to_gb_host_device(dev);
38
39         return sprintf(buf, "%d\n", hd->bus_id);
40 }
41 static DEVICE_ATTR_RO(bus_id);
42
43 static struct attribute *bus_attrs[] = {
44         &dev_attr_bus_id.attr,
45         NULL
46 };
47 ATTRIBUTE_GROUPS(bus);
48
49 int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
50 {
51         struct ida *id_map = &hd->cport_id_map;
52         int ret;
53
54         ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
55         if (ret < 0) {
56                 dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
57                 return ret;
58         }
59
60         return 0;
61 }
62 EXPORT_SYMBOL_GPL(gb_hd_cport_reserve);
63
64 void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
65 {
66         struct ida *id_map = &hd->cport_id_map;
67
68         ida_simple_remove(id_map, cport_id);
69 }
70 EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
71
72 /* Locking: Caller guarantees serialisation */
73 int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
74                                 unsigned long flags)
75 {
76         struct ida *id_map = &hd->cport_id_map;
77         int ida_start, ida_end;
78
79         if (hd->driver->cport_allocate)
80                 return hd->driver->cport_allocate(hd, cport_id, flags);
81
82         if (cport_id < 0) {
83                 ida_start = 0;
84                 ida_end = hd->num_cports;
85         } else if (cport_id < hd->num_cports) {
86                 ida_start = cport_id;
87                 ida_end = cport_id + 1;
88         } else {
89                 dev_err(&hd->dev, "cport %d not available\n", cport_id);
90                 return -EINVAL;
91         }
92
93         return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
94 }
95
96 /* Locking: Caller guarantees serialisation */
97 void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
98 {
99         if (hd->driver->cport_release) {
100                 hd->driver->cport_release(hd, cport_id);
101                 return;
102         }
103
104         ida_simple_remove(&hd->cport_id_map, cport_id);
105 }
106
107 static void gb_hd_release(struct device *dev)
108 {
109         struct gb_host_device *hd = to_gb_host_device(dev);
110
111         trace_gb_hd_release(hd);
112
113         if (hd->svc)
114                 gb_svc_put(hd->svc);
115         ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
116         ida_destroy(&hd->cport_id_map);
117         kfree(hd);
118 }
119
120 struct device_type greybus_hd_type = {
121         .name           = "greybus_host_device",
122         .release        = gb_hd_release,
123 };
124
125 struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
126                                         struct device *parent,
127                                         size_t buffer_size_max,
128                                         size_t num_cports)
129 {
130         struct gb_host_device *hd;
131         int ret;
132
133         /*
134          * Validate that the driver implements all of the callbacks
135          * so that we don't have to every time we make them.
136          */
137         if ((!driver->message_send) || (!driver->message_cancel)) {
138                 dev_err(parent, "mandatory hd-callbacks missing\n");
139                 return ERR_PTR(-EINVAL);
140         }
141
142         if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) {
143                 dev_err(parent, "greybus host-device buffers too small\n");
144                 return ERR_PTR(-EINVAL);
145         }
146
147         if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) {
148                 dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports);
149                 return ERR_PTR(-EINVAL);
150         }
151
152         /*
153          * Make sure to never allocate messages larger than what the Greybus
154          * protocol supports.
155          */
156         if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) {
157                 dev_warn(parent, "limiting buffer size to %u\n",
158                          GB_OPERATION_MESSAGE_SIZE_MAX);
159                 buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX;
160         }
161
162         hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL);
163         if (!hd)
164                 return ERR_PTR(-ENOMEM);
165
166         ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
167         if (ret < 0) {
168                 kfree(hd);
169                 return ERR_PTR(ret);
170         }
171         hd->bus_id = ret;
172
173         hd->driver = driver;
174         INIT_LIST_HEAD(&hd->modules);
175         INIT_LIST_HEAD(&hd->connections);
176         ida_init(&hd->cport_id_map);
177         hd->buffer_size_max = buffer_size_max;
178         hd->num_cports = num_cports;
179
180         hd->dev.parent = parent;
181         hd->dev.bus = &greybus_bus_type;
182         hd->dev.type = &greybus_hd_type;
183         hd->dev.groups = bus_groups;
184         hd->dev.dma_mask = hd->dev.parent->dma_mask;
185         device_initialize(&hd->dev);
186         dev_set_name(&hd->dev, "greybus%d", hd->bus_id);
187
188         trace_gb_hd_create(hd);
189
190         hd->svc = gb_svc_create(hd);
191         if (!hd->svc) {
192                 dev_err(&hd->dev, "failed to create svc\n");
193                 put_device(&hd->dev);
194                 return ERR_PTR(-ENOMEM);
195         }
196
197         return hd;
198 }
199 EXPORT_SYMBOL_GPL(gb_hd_create);
200
201 int gb_hd_add(struct gb_host_device *hd)
202 {
203         int ret;
204
205         ret = device_add(&hd->dev);
206         if (ret)
207                 return ret;
208
209         ret = gb_svc_add(hd->svc);
210         if (ret) {
211                 device_del(&hd->dev);
212                 return ret;
213         }
214
215         trace_gb_hd_add(hd);
216
217         return 0;
218 }
219 EXPORT_SYMBOL_GPL(gb_hd_add);
220
221 void gb_hd_del(struct gb_host_device *hd)
222 {
223         trace_gb_hd_del(hd);
224
225         /*
226          * Tear down the svc and flush any on-going hotplug processing before
227          * removing the remaining interfaces.
228          */
229         gb_svc_del(hd->svc);
230
231         device_del(&hd->dev);
232 }
233 EXPORT_SYMBOL_GPL(gb_hd_del);
234
235 void gb_hd_shutdown(struct gb_host_device *hd)
236 {
237         gb_svc_del(hd->svc);
238 }
239 EXPORT_SYMBOL_GPL(gb_hd_shutdown);
240
241 void gb_hd_put(struct gb_host_device *hd)
242 {
243         put_device(&hd->dev);
244 }
245 EXPORT_SYMBOL_GPL(gb_hd_put);
246
247 int __init gb_hd_init(void)
248 {
249         ida_init(&gb_hd_bus_id_map);
250
251         return 0;
252 }
253
254 void gb_hd_exit(void)
255 {
256         ida_destroy(&gb_hd_bus_id_map);
257 }