greybus: svc: use EREMOTEIO for remote errors
[cascardo/linux.git] / drivers / staging / greybus / loopback.c
1 /*
2  * Loopback bridge driver for the Greybus loopback module.
3  *
4  * Copyright 2014 Google Inc.
5  * Copyright 2014 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/random.h>
19 #include <linux/sizes.h>
20 #include <linux/cdev.h>
21 #include <linux/fs.h>
22 #include <linux/kfifo.h>
23 #include <linux/debugfs.h>
24 #include <linux/list_sort.h>
25 #include <linux/spinlock.h>
26 #include <linux/workqueue.h>
27 #include <linux/atomic.h>
28
29 #include <asm/div64.h>
30
31 #include "greybus.h"
32 #include "connection.h"
33
34 #define NSEC_PER_DAY 86400000000000ULL
35
36 struct gb_loopback_stats {
37         u32 min;
38         u32 max;
39         u64 sum;
40         u32 count;
41 };
42
43 struct gb_loopback_device {
44         struct dentry *root;
45         u32 count;
46         size_t size_max;
47
48         /* We need to take a lock in atomic context */
49         spinlock_t lock;
50         struct list_head list;
51         struct list_head list_op_async;
52         wait_queue_head_t wq;
53 };
54
55 static struct gb_loopback_device gb_dev;
56
57 struct gb_loopback_async_operation {
58         struct gb_loopback *gb;
59         struct gb_operation *operation;
60         struct timeval ts;
61         struct timer_list timer;
62         struct list_head entry;
63         struct work_struct work;
64         struct kref kref;
65         bool pending;
66         int (*completion)(struct gb_loopback_async_operation *op_async);
67 };
68
69 struct gb_loopback {
70         struct gb_connection *connection;
71
72         struct dentry *file;
73         struct kfifo kfifo_lat;
74         struct kfifo kfifo_ts;
75         struct mutex mutex;
76         struct task_struct *task;
77         struct list_head entry;
78         struct device *dev;
79         wait_queue_head_t wq;
80         wait_queue_head_t wq_completion;
81         atomic_t outstanding_operations;
82
83         /* Per connection stats */
84         struct timeval ts;
85         struct gb_loopback_stats latency;
86         struct gb_loopback_stats throughput;
87         struct gb_loopback_stats requests_per_second;
88         struct gb_loopback_stats apbridge_unipro_latency;
89         struct gb_loopback_stats gbphy_firmware_latency;
90
91         int type;
92         int async;
93         int id;
94         u32 size;
95         u32 iteration_max;
96         u32 iteration_count;
97         int us_wait;
98         u32 error;
99         u32 requests_completed;
100         u32 requests_timedout;
101         u32 timeout;
102         u32 jiffy_timeout;
103         u32 timeout_min;
104         u32 timeout_max;
105         u32 outstanding_operations_max;
106         u32 lbid;
107         u64 elapsed_nsecs;
108         u32 apbridge_latency_ts;
109         u32 gbphy_latency_ts;
110
111         u32 send_count;
112 };
113
114 static struct class loopback_class = {
115         .name           = "gb_loopback",
116         .owner          = THIS_MODULE,
117 };
118 static DEFINE_IDA(loopback_ida);
119
120 /* Min/max values in jiffies */
121 #define GB_LOOPBACK_TIMEOUT_MIN                         1
122 #define GB_LOOPBACK_TIMEOUT_MAX                         10000
123
124 #define GB_LOOPBACK_FIFO_DEFAULT                        8192
125
126 static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
127 module_param(kfifo_depth, uint, 0444);
128
129 /* Maximum size of any one send data buffer we support */
130 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
131
132 #define GB_LOOPBACK_US_WAIT_MAX                         1000000
133
134 /* interface sysfs attributes */
135 #define gb_loopback_ro_attr(field)                              \
136 static ssize_t field##_show(struct device *dev,                 \
137                             struct device_attribute *attr,              \
138                             char *buf)                                  \
139 {                                                                       \
140         struct gb_loopback *gb = dev_get_drvdata(dev);                  \
141         return sprintf(buf, "%u\n", gb->field);                 \
142 }                                                                       \
143 static DEVICE_ATTR_RO(field)
144
145 #define gb_loopback_ro_stats_attr(name, field, type)            \
146 static ssize_t name##_##field##_show(struct device *dev,        \
147                             struct device_attribute *attr,              \
148                             char *buf)                                  \
149 {                                                                       \
150         struct gb_loopback *gb = dev_get_drvdata(dev);                  \
151         /* Report 0 for min and max if no transfer successed */         \
152         if (!gb->requests_completed)                                    \
153                 return sprintf(buf, "0\n");                             \
154         return sprintf(buf, "%"#type"\n", gb->name.field);      \
155 }                                                                       \
156 static DEVICE_ATTR_RO(name##_##field)
157
158 #define gb_loopback_ro_avg_attr(name)                   \
159 static ssize_t name##_avg_show(struct device *dev,              \
160                             struct device_attribute *attr,              \
161                             char *buf)                                  \
162 {                                                                       \
163         struct gb_loopback_stats *stats;                                \
164         struct gb_loopback *gb;                                         \
165         u64 avg, rem;                                                   \
166         u32 count;                                                      \
167         gb = dev_get_drvdata(dev);                      \
168         stats = &gb->name;                                      \
169         count = stats->count ? stats->count : 1;                        \
170         avg = stats->sum + count / 2000000; /* round closest */         \
171         rem = do_div(avg, count);                                       \
172         rem *= 1000000;                                                 \
173         do_div(rem, count);                                             \
174         return sprintf(buf, "%llu.%06u\n", avg, (u32)rem);              \
175 }                                                                       \
176 static DEVICE_ATTR_RO(name##_avg)
177
178 #define gb_loopback_stats_attrs(field)                          \
179         gb_loopback_ro_stats_attr(field, min, u);               \
180         gb_loopback_ro_stats_attr(field, max, u);               \
181         gb_loopback_ro_avg_attr(field)
182
183 #define gb_loopback_attr(field, type)                                   \
184 static ssize_t field##_show(struct device *dev,                         \
185                             struct device_attribute *attr,              \
186                             char *buf)                                  \
187 {                                                                       \
188         struct gb_loopback *gb = dev_get_drvdata(dev);                  \
189         return sprintf(buf, "%"#type"\n", gb->field);                   \
190 }                                                                       \
191 static ssize_t field##_store(struct device *dev,                        \
192                             struct device_attribute *attr,              \
193                             const char *buf,                            \
194                             size_t len)                                 \
195 {                                                                       \
196         int ret;                                                        \
197         struct gb_loopback *gb = dev_get_drvdata(dev);                  \
198         mutex_lock(&gb->mutex);                                         \
199         ret = sscanf(buf, "%"#type, &gb->field);                        \
200         if (ret != 1)                                                   \
201                 len = -EINVAL;                                          \
202         else                                                            \
203                 gb_loopback_check_attr(gb, bundle);                     \
204         mutex_unlock(&gb->mutex);                                       \
205         return len;                                                     \
206 }                                                                       \
207 static DEVICE_ATTR_RW(field)
208
209 #define gb_dev_loopback_ro_attr(field, conn)                            \
210 static ssize_t field##_show(struct device *dev,         \
211                             struct device_attribute *attr,              \
212                             char *buf)                                  \
213 {                                                                       \
214         struct gb_loopback *gb = dev_get_drvdata(dev);                  \
215         return sprintf(buf, "%u\n", gb->field);                         \
216 }                                                                       \
217 static DEVICE_ATTR_RO(field)
218
219 #define gb_dev_loopback_rw_attr(field, type)                            \
220 static ssize_t field##_show(struct device *dev,                         \
221                             struct device_attribute *attr,              \
222                             char *buf)                                  \
223 {                                                                       \
224         struct gb_loopback *gb = dev_get_drvdata(dev);                  \
225         return sprintf(buf, "%"#type"\n", gb->field);                   \
226 }                                                                       \
227 static ssize_t field##_store(struct device *dev,                        \
228                             struct device_attribute *attr,              \
229                             const char *buf,                            \
230                             size_t len)                                 \
231 {                                                                       \
232         int ret;                                                        \
233         struct gb_loopback *gb = dev_get_drvdata(dev);                  \
234         mutex_lock(&gb->mutex);                                         \
235         ret = sscanf(buf, "%"#type, &gb->field);                        \
236         if (ret != 1)                                                   \
237                 len = -EINVAL;                                          \
238         else                                                            \
239                 gb_loopback_check_attr(gb);             \
240         mutex_unlock(&gb->mutex);                                       \
241         return len;                                                     \
242 }                                                                       \
243 static DEVICE_ATTR_RW(field)
244
245 static void gb_loopback_reset_stats(struct gb_loopback *gb);
246 static void gb_loopback_check_attr(struct gb_loopback *gb)
247 {
248         if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX)
249                 gb->us_wait = GB_LOOPBACK_US_WAIT_MAX;
250         if (gb->size > gb_dev.size_max)
251                 gb->size = gb_dev.size_max;
252         gb->requests_timedout = 0;
253         gb->requests_completed = 0;
254         gb->iteration_count = 0;
255         gb->send_count = 0;
256         gb->error = 0;
257
258         if (kfifo_depth < gb->iteration_max) {
259                 dev_warn(gb->dev,
260                          "cannot log bytes %u kfifo_depth %u\n",
261                          gb->iteration_max, kfifo_depth);
262         }
263         kfifo_reset_out(&gb->kfifo_lat);
264         kfifo_reset_out(&gb->kfifo_ts);
265
266         switch (gb->type) {
267         case GB_LOOPBACK_TYPE_PING:
268         case GB_LOOPBACK_TYPE_TRANSFER:
269         case GB_LOOPBACK_TYPE_SINK:
270                 gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
271                 if (!gb->jiffy_timeout)
272                         gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
273                 else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
274                         gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
275                 gb_loopback_reset_stats(gb);
276                 wake_up(&gb->wq);
277                 break;
278         default:
279                 gb->type = 0;
280                 break;
281         }
282 }
283
284 /* Time to send and receive one message */
285 gb_loopback_stats_attrs(latency);
286 /* Number of requests sent per second on this cport */
287 gb_loopback_stats_attrs(requests_per_second);
288 /* Quantity of data sent and received on this cport */
289 gb_loopback_stats_attrs(throughput);
290 /* Latency across the UniPro link from APBridge's perspective */
291 gb_loopback_stats_attrs(apbridge_unipro_latency);
292 /* Firmware induced overhead in the GPBridge */
293 gb_loopback_stats_attrs(gbphy_firmware_latency);
294
295 /* Number of errors encountered during loop */
296 gb_loopback_ro_attr(error);
297 /* Number of requests successfully completed async */
298 gb_loopback_ro_attr(requests_completed);
299 /* Number of requests timed out async */
300 gb_loopback_ro_attr(requests_timedout);
301 /* Timeout minimum in useconds */
302 gb_loopback_ro_attr(timeout_min);
303 /* Timeout minimum in useconds */
304 gb_loopback_ro_attr(timeout_max);
305
306 /*
307  * Type of loopback message to send based on protocol type definitions
308  * 0 => Don't send message
309  * 2 => Send ping message continuously (message without payload)
310  * 3 => Send transfer message continuously (message with payload,
311  *                                         payload returned in response)
312  * 4 => Send a sink message (message with payload, no payload in response)
313  */
314 gb_dev_loopback_rw_attr(type, d);
315 /* Size of transfer message payload: 0-4096 bytes */
316 gb_dev_loopback_rw_attr(size, u);
317 /* Time to wait between two messages: 0-1000 ms */
318 gb_dev_loopback_rw_attr(us_wait, d);
319 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
320 gb_dev_loopback_rw_attr(iteration_max, u);
321 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
322 gb_dev_loopback_ro_attr(iteration_count, false);
323 /* A flag to indicate synchronous or asynchronous operations */
324 gb_dev_loopback_rw_attr(async, u);
325 /* Timeout of an individual asynchronous request */
326 gb_dev_loopback_rw_attr(timeout, u);
327 /* Maximum number of in-flight operations before back-off */
328 gb_dev_loopback_rw_attr(outstanding_operations_max, u);
329
330 static struct attribute *loopback_attrs[] = {
331         &dev_attr_latency_min.attr,
332         &dev_attr_latency_max.attr,
333         &dev_attr_latency_avg.attr,
334         &dev_attr_requests_per_second_min.attr,
335         &dev_attr_requests_per_second_max.attr,
336         &dev_attr_requests_per_second_avg.attr,
337         &dev_attr_throughput_min.attr,
338         &dev_attr_throughput_max.attr,
339         &dev_attr_throughput_avg.attr,
340         &dev_attr_apbridge_unipro_latency_min.attr,
341         &dev_attr_apbridge_unipro_latency_max.attr,
342         &dev_attr_apbridge_unipro_latency_avg.attr,
343         &dev_attr_gbphy_firmware_latency_min.attr,
344         &dev_attr_gbphy_firmware_latency_max.attr,
345         &dev_attr_gbphy_firmware_latency_avg.attr,
346         &dev_attr_type.attr,
347         &dev_attr_size.attr,
348         &dev_attr_us_wait.attr,
349         &dev_attr_iteration_count.attr,
350         &dev_attr_iteration_max.attr,
351         &dev_attr_async.attr,
352         &dev_attr_error.attr,
353         &dev_attr_requests_completed.attr,
354         &dev_attr_requests_timedout.attr,
355         &dev_attr_timeout.attr,
356         &dev_attr_outstanding_operations_max.attr,
357         &dev_attr_timeout_min.attr,
358         &dev_attr_timeout_max.attr,
359         NULL,
360 };
361 ATTRIBUTE_GROUPS(loopback);
362
363 static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
364
365 static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
366 {
367         u32 lat;
368
369         do_div(elapsed_nsecs, NSEC_PER_USEC);
370         lat = elapsed_nsecs;
371         return lat;
372 }
373
374 static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
375 {
376         if (t2 > t1)
377                 return t2 - t1;
378         else
379                 return NSEC_PER_DAY - t2 + t1;
380 }
381
382 static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
383 {
384         u64 t1, t2;
385
386         t1 = timeval_to_ns(ts);
387         t2 = timeval_to_ns(te);
388
389         return __gb_loopback_calc_latency(t1, t2);
390 }
391
392 static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
393                                         struct timeval *ts, struct timeval *te)
394 {
395         kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
396         kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
397 }
398
399 static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
400                                       void *request, int request_size,
401                                       void *response, int response_size)
402 {
403         struct gb_operation *operation;
404         struct timeval ts, te;
405         int ret;
406
407         do_gettimeofday(&ts);
408         operation = gb_operation_create(gb->connection, type, request_size,
409                                         response_size, GFP_KERNEL);
410         if (!operation)
411                 return -ENOMEM;
412
413         if (request_size)
414                 memcpy(operation->request->payload, request, request_size);
415
416         ret = gb_operation_request_send_sync(operation);
417         if (ret) {
418                 dev_err(&gb->connection->bundle->dev,
419                         "synchronous operation failed: %d\n", ret);
420                 goto out_put_operation;
421         } else {
422                 if (response_size == operation->response->payload_size) {
423                         memcpy(response, operation->response->payload,
424                                response_size);
425                 } else {
426                         dev_err(&gb->connection->bundle->dev,
427                                 "response size %zu expected %d\n",
428                                 operation->response->payload_size,
429                                 response_size);
430                         ret = -EINVAL;
431                         goto out_put_operation;
432                 }
433         }
434
435         do_gettimeofday(&te);
436
437         /* Calculate the total time the message took */
438         gb_loopback_push_latency_ts(gb, &ts, &te);
439         gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
440
441 out_put_operation:
442         gb_operation_put(operation);
443
444         return ret;
445 }
446
447 static void __gb_loopback_async_operation_destroy(struct kref *kref)
448 {
449         struct gb_loopback_async_operation *op_async;
450
451         op_async = container_of(kref, struct gb_loopback_async_operation, kref);
452
453         list_del(&op_async->entry);
454         if (op_async->operation)
455                 gb_operation_put(op_async->operation);
456         atomic_dec(&op_async->gb->outstanding_operations);
457         wake_up(&op_async->gb->wq_completion);
458         kfree(op_async);
459 }
460
461 static void gb_loopback_async_operation_get(struct gb_loopback_async_operation
462                                             *op_async)
463 {
464         kref_get(&op_async->kref);
465 }
466
467 static void gb_loopback_async_operation_put(struct gb_loopback_async_operation
468                                             *op_async)
469 {
470         unsigned long flags;
471
472         spin_lock_irqsave(&gb_dev.lock, flags);
473         kref_put(&op_async->kref, __gb_loopback_async_operation_destroy);
474         spin_unlock_irqrestore(&gb_dev.lock, flags);
475 }
476
477 static struct gb_loopback_async_operation *
478         gb_loopback_operation_find(u16 id)
479 {
480         struct gb_loopback_async_operation *op_async;
481         bool found = false;
482         unsigned long flags;
483
484         spin_lock_irqsave(&gb_dev.lock, flags);
485         list_for_each_entry(op_async, &gb_dev.list_op_async, entry) {
486                 if (op_async->operation->id == id) {
487                         gb_loopback_async_operation_get(op_async);
488                         found = true;
489                         break;
490                 }
491         }
492         spin_unlock_irqrestore(&gb_dev.lock, flags);
493
494         return found ? op_async : NULL;
495 }
496
497 static void gb_loopback_async_wait_all(struct gb_loopback *gb)
498 {
499         wait_event(gb->wq_completion,
500                    !atomic_read(&gb->outstanding_operations));
501 }
502
503 static void gb_loopback_async_operation_callback(struct gb_operation *operation)
504 {
505         struct gb_loopback_async_operation *op_async;
506         struct gb_loopback *gb;
507         struct timeval te;
508         bool err = false;
509
510         do_gettimeofday(&te);
511         op_async = gb_loopback_operation_find(operation->id);
512         if (!op_async)
513                 return;
514
515         gb = op_async->gb;
516         mutex_lock(&gb->mutex);
517
518         if (!op_async->pending || gb_operation_result(operation)) {
519                 err = true;
520         } else {
521                 if (op_async->completion)
522                         if (op_async->completion(op_async))
523                                 err = true;
524         }
525
526         if (!err) {
527                 gb_loopback_push_latency_ts(gb, &op_async->ts, &te);
528                 gb->elapsed_nsecs = gb_loopback_calc_latency(&op_async->ts,
529                                                              &te);
530         }
531
532         if (op_async->pending) {
533                 if (err)
534                         gb->error++;
535                 gb->iteration_count++;
536                 op_async->pending = false;
537                 del_timer_sync(&op_async->timer);
538                 gb_loopback_async_operation_put(op_async);
539                 gb_loopback_calculate_stats(gb, err);
540         }
541         mutex_unlock(&gb->mutex);
542
543         dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
544                 operation->id);
545
546         gb_loopback_async_operation_put(op_async);
547 }
548
549 static void gb_loopback_async_operation_work(struct work_struct *work)
550 {
551         struct gb_loopback *gb;
552         struct gb_operation *operation;
553         struct gb_loopback_async_operation *op_async;
554
555         op_async = container_of(work, struct gb_loopback_async_operation, work);
556         gb = op_async->gb;
557         operation = op_async->operation;
558
559         mutex_lock(&gb->mutex);
560         if (op_async->pending) {
561                 gb->requests_timedout++;
562                 gb->error++;
563                 gb->iteration_count++;
564                 op_async->pending = false;
565                 gb_loopback_async_operation_put(op_async);
566                 gb_loopback_calculate_stats(gb, true);
567         }
568         mutex_unlock(&gb->mutex);
569
570         dev_dbg(&gb->connection->bundle->dev, "timeout operation %d\n",
571                 operation->id);
572
573         gb_operation_cancel(operation, -ETIMEDOUT);
574         gb_loopback_async_operation_put(op_async);
575 }
576
577 static void gb_loopback_async_operation_timeout(unsigned long data)
578 {
579         struct gb_loopback_async_operation *op_async;
580         u16 id = data;
581
582         op_async = gb_loopback_operation_find(id);
583         if (!op_async) {
584                 pr_err("operation %d not found - time out ?\n", id);
585                 return;
586         }
587         schedule_work(&op_async->work);
588 }
589
590 static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
591                                        void *request, int request_size,
592                                        int response_size,
593                                        void *completion)
594 {
595         struct gb_loopback_async_operation *op_async;
596         struct gb_operation *operation;
597         int ret;
598         unsigned long flags;
599
600         op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
601         if (!op_async)
602                 return -ENOMEM;
603
604         INIT_WORK(&op_async->work, gb_loopback_async_operation_work);
605         init_timer(&op_async->timer);
606         kref_init(&op_async->kref);
607
608         operation = gb_operation_create(gb->connection, type, request_size,
609                                         response_size, GFP_KERNEL);
610         if (!operation) {
611                 kfree(op_async);
612                 return -ENOMEM;
613         }
614
615         if (request_size)
616                 memcpy(operation->request->payload, request, request_size);
617
618         op_async->gb = gb;
619         op_async->operation = operation;
620         op_async->completion = completion;
621
622         spin_lock_irqsave(&gb_dev.lock, flags);
623         list_add_tail(&op_async->entry, &gb_dev.list_op_async);
624         spin_unlock_irqrestore(&gb_dev.lock, flags);
625
626         do_gettimeofday(&op_async->ts);
627         op_async->pending = true;
628         atomic_inc(&gb->outstanding_operations);
629         mutex_lock(&gb->mutex);
630         ret = gb_operation_request_send(operation,
631                                         gb_loopback_async_operation_callback,
632                                         GFP_KERNEL);
633         if (ret)
634                 goto error;
635
636         op_async->timer.function = gb_loopback_async_operation_timeout;
637         op_async->timer.expires = jiffies + gb->jiffy_timeout;
638         op_async->timer.data = (unsigned long)operation->id;
639         add_timer(&op_async->timer);
640
641         goto done;
642 error:
643         gb_loopback_async_operation_put(op_async);
644 done:
645         mutex_unlock(&gb->mutex);
646         return ret;
647 }
648
649 static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
650 {
651         struct gb_loopback_transfer_request *request;
652         int retval;
653
654         request = kmalloc(len + sizeof(*request), GFP_KERNEL);
655         if (!request)
656                 return -ENOMEM;
657
658         request->len = cpu_to_le32(len);
659         retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
660                                             request, len + sizeof(*request),
661                                             NULL, 0);
662         kfree(request);
663         return retval;
664 }
665
666 static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
667 {
668         struct gb_loopback_transfer_request *request;
669         struct gb_loopback_transfer_response *response;
670         int retval;
671
672         gb->apbridge_latency_ts = 0;
673         gb->gbphy_latency_ts = 0;
674
675         request = kmalloc(len + sizeof(*request), GFP_KERNEL);
676         if (!request)
677                 return -ENOMEM;
678         response = kmalloc(len + sizeof(*response), GFP_KERNEL);
679         if (!response) {
680                 kfree(request);
681                 return -ENOMEM;
682         }
683
684         memset(request->data, 0x5A, len);
685
686         request->len = cpu_to_le32(len);
687         retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
688                                             request, len + sizeof(*request),
689                                             response, len + sizeof(*response));
690         if (retval)
691                 goto gb_error;
692
693         if (memcmp(request->data, response->data, len)) {
694                 dev_err(&gb->connection->bundle->dev,
695                         "Loopback Data doesn't match\n");
696                 retval = -EREMOTEIO;
697         }
698         gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
699         gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1);
700
701 gb_error:
702         kfree(request);
703         kfree(response);
704
705         return retval;
706 }
707
708 static int gb_loopback_sync_ping(struct gb_loopback *gb)
709 {
710         return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
711                                           NULL, 0, NULL, 0);
712 }
713
714 static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
715 {
716         struct gb_loopback_transfer_request *request;
717         int retval;
718
719         request = kmalloc(len + sizeof(*request), GFP_KERNEL);
720         if (!request)
721                 return -ENOMEM;
722
723         request->len = cpu_to_le32(len);
724         retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
725                                              request, len + sizeof(*request),
726                                              0, NULL);
727         kfree(request);
728         return retval;
729 }
730
731 static int gb_loopback_async_transfer_complete(
732                                 struct gb_loopback_async_operation *op_async)
733 {
734         struct gb_loopback *gb;
735         struct gb_operation *operation;
736         struct gb_loopback_transfer_request *request;
737         struct gb_loopback_transfer_response *response;
738         size_t len;
739         int retval = 0;
740
741         gb = op_async->gb;
742         operation = op_async->operation;
743         request = operation->request->payload;
744         response = operation->response->payload;
745         len = le32_to_cpu(request->len);
746
747         if (memcmp(request->data, response->data, len)) {
748                 dev_err(&gb->connection->bundle->dev,
749                         "Loopback Data doesn't match operation id %d\n",
750                         operation->id);
751                 retval = -EREMOTEIO;
752         } else {
753                 gb->apbridge_latency_ts =
754                         (u32)__le32_to_cpu(response->reserved0);
755                 gb->gbphy_latency_ts =
756                         (u32)__le32_to_cpu(response->reserved1);
757         }
758
759         return retval;
760 }
761
762 static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
763 {
764         struct gb_loopback_transfer_request *request;
765         int retval, response_len;
766
767         request = kmalloc(len + sizeof(*request), GFP_KERNEL);
768         if (!request)
769                 return -ENOMEM;
770
771         memset(request->data, 0x5A, len);
772
773         request->len = cpu_to_le32(len);
774         response_len = sizeof(struct gb_loopback_transfer_response);
775         retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
776                                              request, len + sizeof(*request),
777                                              len + response_len,
778                                              gb_loopback_async_transfer_complete);
779         if (retval)
780                 goto gb_error;
781
782 gb_error:
783         kfree(request);
784         return retval;
785 }
786
787 static int gb_loopback_async_ping(struct gb_loopback *gb)
788 {
789         return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
790                                            NULL, 0, 0, NULL);
791 }
792
793 static int gb_loopback_request_handler(struct gb_operation *operation)
794 {
795         struct gb_connection *connection = operation->connection;
796         struct gb_loopback_transfer_request *request;
797         struct gb_loopback_transfer_response *response;
798         struct device *dev = &connection->bundle->dev;
799         size_t len;
800
801         /* By convention, the AP initiates the version operation */
802         switch (operation->type) {
803         case GB_LOOPBACK_TYPE_PING:
804         case GB_LOOPBACK_TYPE_SINK:
805                 return 0;
806         case GB_LOOPBACK_TYPE_TRANSFER:
807                 if (operation->request->payload_size < sizeof(*request)) {
808                         dev_err(dev, "transfer request too small (%zu < %zu)\n",
809                                 operation->request->payload_size,
810                                 sizeof(*request));
811                         return -EINVAL; /* -EMSGSIZE */
812                 }
813                 request = operation->request->payload;
814                 len = le32_to_cpu(request->len);
815                 if (len > gb_dev.size_max) {
816                         dev_err(dev, "transfer request too large (%zu > %zu)\n",
817                                 len, gb_dev.size_max);
818                         return -EINVAL;
819                 }
820
821                 if (!gb_operation_response_alloc(operation,
822                                 len + sizeof(*response), GFP_KERNEL)) {
823                         dev_err(dev, "error allocating response\n");
824                         return -ENOMEM;
825                 }
826                 response = operation->response->payload;
827                 response->len = cpu_to_le32(len);
828                 if (len)
829                         memcpy(response->data, request->data, len);
830
831                 return 0;
832         default:
833                 dev_err(dev, "unsupported request: %u\n", operation->type);
834                 return -EINVAL;
835         }
836 }
837
838 static void gb_loopback_reset_stats(struct gb_loopback *gb)
839 {
840         struct gb_loopback_stats reset = {
841                 .min = U32_MAX,
842         };
843
844         /* Reset per-connection stats */
845         memcpy(&gb->latency, &reset,
846                sizeof(struct gb_loopback_stats));
847         memcpy(&gb->throughput, &reset,
848                sizeof(struct gb_loopback_stats));
849         memcpy(&gb->requests_per_second, &reset,
850                sizeof(struct gb_loopback_stats));
851         memcpy(&gb->apbridge_unipro_latency, &reset,
852                sizeof(struct gb_loopback_stats));
853         memcpy(&gb->gbphy_firmware_latency, &reset,
854                sizeof(struct gb_loopback_stats));
855
856         /* Should be initialized at least once per transaction set */
857         gb->apbridge_latency_ts = 0;
858         gb->gbphy_latency_ts = 0;
859         memset(&gb->ts, 0, sizeof(struct timeval));
860 }
861
862 static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
863 {
864         if (stats->min > val)
865                 stats->min = val;
866         if (stats->max < val)
867                 stats->max = val;
868         stats->sum += val;
869         stats->count++;
870 }
871
872 static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats,
873                                             u64 val, u32 count)
874 {
875         stats->sum += val;
876         stats->count += count;
877
878         do_div(val, count);
879         if (stats->min > val)
880                 stats->min = val;
881         if (stats->max < val)
882                 stats->max = val;
883 }
884
885 static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
886 {
887         u64 req = gb->requests_completed * USEC_PER_SEC;
888
889         gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
890 }
891
892 static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
893 {
894         u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
895
896         switch (gb->type) {
897         case GB_LOOPBACK_TYPE_PING:
898                 break;
899         case GB_LOOPBACK_TYPE_SINK:
900                 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
901                                   gb->size;
902                 break;
903         case GB_LOOPBACK_TYPE_TRANSFER:
904                 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
905                                   sizeof(struct gb_loopback_transfer_response) +
906                                   gb->size * 2;
907                 break;
908         default:
909                 return;
910         }
911
912         aggregate_size *= gb->requests_completed;
913         aggregate_size *= USEC_PER_SEC;
914         gb_loopback_update_stats_window(&gb->throughput, aggregate_size,
915                                         latency);
916 }
917
918 static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb)
919 {
920         u32 lat;
921
922         /* Express latency in terms of microseconds */
923         lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
924
925         /* Log latency stastic */
926         gb_loopback_update_stats(&gb->latency, lat);
927
928         /* Raw latency log on a per thread basis */
929         kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
930
931         /* Log the firmware supplied latency values */
932         gb_loopback_update_stats(&gb->apbridge_unipro_latency,
933                                  gb->apbridge_latency_ts);
934         gb_loopback_update_stats(&gb->gbphy_firmware_latency,
935                                  gb->gbphy_latency_ts);
936 }
937
938 static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
939 {
940         u64 nlat;
941         u32 lat;
942         struct timeval te;
943
944         if (!error) {
945                 gb->requests_completed++;
946                 gb_loopback_calculate_latency_stats(gb);
947         }
948
949         do_gettimeofday(&te);
950         nlat = gb_loopback_calc_latency(&gb->ts, &te);
951         if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
952                 lat = gb_loopback_nsec_to_usec_latency(nlat);
953
954                 gb_loopback_throughput_update(gb, lat);
955                 gb_loopback_requests_update(gb, lat);
956
957                 if (gb->iteration_count != gb->iteration_max) {
958                         gb->ts = te;
959                         gb->requests_completed = 0;
960                 }
961         }
962 }
963
964 static void gb_loopback_async_wait_to_send(struct gb_loopback *gb)
965 {
966         if (!(gb->async && gb->outstanding_operations_max))
967                 return;
968         wait_event_interruptible(gb->wq_completion,
969                                  (atomic_read(&gb->outstanding_operations) <
970                                   gb->outstanding_operations_max) ||
971                                   kthread_should_stop());
972 }
973
974 static int gb_loopback_fn(void *data)
975 {
976         int error = 0;
977         int us_wait = 0;
978         int type;
979         u32 size;
980
981         struct gb_loopback *gb = data;
982
983         while (1) {
984                 if (!gb->type)
985                         wait_event_interruptible(gb->wq, gb->type ||
986                                                  kthread_should_stop());
987                 if (kthread_should_stop())
988                         break;
989
990                 /* Limit the maximum number of in-flight async operations */
991                 gb_loopback_async_wait_to_send(gb);
992                 if (kthread_should_stop())
993                         break;
994
995                 mutex_lock(&gb->mutex);
996
997                 /* Optionally terminate */
998                 if (gb->send_count == gb->iteration_max) {
999                         if (gb->iteration_count == gb->iteration_max) {
1000                                 gb->type = 0;
1001                                 gb->send_count = 0;
1002                                 sysfs_notify(&gb->dev->kobj,  NULL,
1003                                                 "iteration_count");
1004                         }
1005                         mutex_unlock(&gb->mutex);
1006                         continue;
1007                 }
1008                 size = gb->size;
1009                 us_wait = gb->us_wait;
1010                 type = gb->type;
1011                 if (gb->ts.tv_usec == 0 && gb->ts.tv_sec == 0)
1012                         do_gettimeofday(&gb->ts);
1013                 mutex_unlock(&gb->mutex);
1014
1015                 /* Else operations to perform */
1016                 if (gb->async) {
1017                         if (type == GB_LOOPBACK_TYPE_PING) {
1018                                 error = gb_loopback_async_ping(gb);
1019                         } else if (type == GB_LOOPBACK_TYPE_TRANSFER) {
1020                                 error = gb_loopback_async_transfer(gb, size);
1021                         } else if (type == GB_LOOPBACK_TYPE_SINK) {
1022                                 error = gb_loopback_async_sink(gb, size);
1023                         }
1024
1025                         if (error)
1026                                 gb->error++;
1027                 } else {
1028                         /* We are effectively single threaded here */
1029                         if (type == GB_LOOPBACK_TYPE_PING)
1030                                 error = gb_loopback_sync_ping(gb);
1031                         else if (type == GB_LOOPBACK_TYPE_TRANSFER)
1032                                 error = gb_loopback_sync_transfer(gb, size);
1033                         else if (type == GB_LOOPBACK_TYPE_SINK)
1034                                 error = gb_loopback_sync_sink(gb, size);
1035
1036                         if (error)
1037                                 gb->error++;
1038                         gb->iteration_count++;
1039                         gb_loopback_calculate_stats(gb, !!error);
1040                 }
1041                 gb->send_count++;
1042                 if (us_wait)
1043                         udelay(us_wait);
1044         }
1045         return 0;
1046 }
1047
1048 static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
1049                                                  struct kfifo *kfifo,
1050                                                  struct mutex *mutex)
1051 {
1052         u32 latency;
1053         int retval;
1054
1055         if (kfifo_len(kfifo) == 0) {
1056                 retval = -EAGAIN;
1057                 goto done;
1058         }
1059
1060         mutex_lock(mutex);
1061         retval = kfifo_out(kfifo, &latency, sizeof(latency));
1062         if (retval > 0) {
1063                 seq_printf(s, "%u", latency);
1064                 retval = 0;
1065         }
1066         mutex_unlock(mutex);
1067 done:
1068         return retval;
1069 }
1070
1071 static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
1072 {
1073         struct gb_loopback *gb = s->private;
1074
1075         return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
1076                                                      &gb->mutex);
1077 }
1078
1079 static int gb_loopback_latency_open(struct inode *inode, struct file *file)
1080 {
1081         return single_open(file, gb_loopback_dbgfs_latency_show,
1082                            inode->i_private);
1083 }
1084
1085 static const struct file_operations gb_loopback_debugfs_latency_ops = {
1086         .open           = gb_loopback_latency_open,
1087         .read           = seq_read,
1088         .llseek         = seq_lseek,
1089         .release        = single_release,
1090 };
1091
1092 static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha,
1093                                       struct list_head *lhb)
1094 {
1095         struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry);
1096         struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry);
1097         struct gb_connection *ca = a->connection;
1098         struct gb_connection *cb = b->connection;
1099
1100         if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id)
1101                 return -1;
1102         if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id)
1103                 return 1;
1104         if (ca->bundle->id < cb->bundle->id)
1105                 return -1;
1106         if (cb->bundle->id < ca->bundle->id)
1107                 return 1;
1108         if (ca->intf_cport_id < cb->intf_cport_id)
1109                 return -1;
1110         else if (cb->intf_cport_id < ca->intf_cport_id)
1111                 return 1;
1112
1113         return 0;
1114 }
1115
1116 static void gb_loopback_insert_id(struct gb_loopback *gb)
1117 {
1118         struct gb_loopback *gb_list;
1119         u32 new_lbid = 0;
1120
1121         /* perform an insertion sort */
1122         list_add_tail(&gb->entry, &gb_dev.list);
1123         list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare);
1124         list_for_each_entry(gb_list, &gb_dev.list, entry) {
1125                 gb_list->lbid = 1 << new_lbid;
1126                 new_lbid++;
1127         }
1128 }
1129
1130 #define DEBUGFS_NAMELEN 32
1131
1132 static int gb_loopback_probe(struct gb_bundle *bundle,
1133                              const struct greybus_bundle_id *id)
1134 {
1135         struct greybus_descriptor_cport *cport_desc;
1136         struct gb_connection *connection;
1137         struct gb_loopback *gb;
1138         struct device *dev;
1139         int retval;
1140         char name[DEBUGFS_NAMELEN];
1141         unsigned long flags;
1142
1143         if (bundle->num_cports != 1)
1144                 return -ENODEV;
1145
1146         cport_desc = &bundle->cport_desc[0];
1147         if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK)
1148                 return -ENODEV;
1149
1150         gb = kzalloc(sizeof(*gb), GFP_KERNEL);
1151         if (!gb)
1152                 return -ENOMEM;
1153
1154         connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
1155                                           gb_loopback_request_handler);
1156         if (IS_ERR(connection)) {
1157                 retval = PTR_ERR(connection);
1158                 goto out_kzalloc;
1159         }
1160
1161         gb->connection = connection;
1162         greybus_set_drvdata(bundle, gb);
1163
1164         init_waitqueue_head(&gb->wq);
1165         init_waitqueue_head(&gb->wq_completion);
1166         atomic_set(&gb->outstanding_operations, 0);
1167         gb_loopback_reset_stats(gb);
1168
1169         /* Reported values to user-space for min/max timeouts */
1170         gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
1171         gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
1172
1173         if (!gb_dev.count) {
1174                 /* Calculate maximum payload */
1175                 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
1176                 if (gb_dev.size_max <=
1177                         sizeof(struct gb_loopback_transfer_request)) {
1178                         retval = -EINVAL;
1179                         goto out_connection_destroy;
1180                 }
1181                 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
1182         }
1183
1184         /* Create per-connection sysfs and debugfs data-points */
1185         snprintf(name, sizeof(name), "raw_latency_%s",
1186                  dev_name(&connection->bundle->dev));
1187         gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb,
1188                                        &gb_loopback_debugfs_latency_ops);
1189
1190         gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL);
1191         if (gb->id < 0) {
1192                 retval = gb->id;
1193                 goto out_debugfs_remove;
1194         }
1195
1196         retval = gb_connection_enable(connection);
1197         if (retval)
1198                 goto out_ida_remove;
1199
1200         dev = device_create_with_groups(&loopback_class,
1201                                         &connection->bundle->dev,
1202                                         MKDEV(0, 0), gb, loopback_groups,
1203                                         "gb_loopback%d", gb->id);
1204         if (IS_ERR(dev)) {
1205                 retval = PTR_ERR(dev);
1206                 goto out_connection_disable;
1207         }
1208         gb->dev = dev;
1209
1210         /* Allocate kfifo */
1211         if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
1212                           GFP_KERNEL)) {
1213                 retval = -ENOMEM;
1214                 goto out_conn;
1215         }
1216         if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
1217                           GFP_KERNEL)) {
1218                 retval = -ENOMEM;
1219                 goto out_kfifo0;
1220         }
1221
1222         /* Fork worker thread */
1223         mutex_init(&gb->mutex);
1224         gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
1225         if (IS_ERR(gb->task)) {
1226                 retval = PTR_ERR(gb->task);
1227                 goto out_kfifo1;
1228         }
1229
1230         spin_lock_irqsave(&gb_dev.lock, flags);
1231         gb_loopback_insert_id(gb);
1232         gb_dev.count++;
1233         spin_unlock_irqrestore(&gb_dev.lock, flags);
1234
1235         gb_connection_latency_tag_enable(connection);
1236         return 0;
1237
1238 out_kfifo1:
1239         kfifo_free(&gb->kfifo_ts);
1240 out_kfifo0:
1241         kfifo_free(&gb->kfifo_lat);
1242 out_conn:
1243         device_unregister(dev);
1244 out_connection_disable:
1245         gb_connection_disable(connection);
1246 out_ida_remove:
1247         ida_simple_remove(&loopback_ida, gb->id);
1248 out_debugfs_remove:
1249         debugfs_remove(gb->file);
1250 out_connection_destroy:
1251         gb_connection_destroy(connection);
1252 out_kzalloc:
1253         kfree(gb);
1254
1255         return retval;
1256 }
1257
1258 static void gb_loopback_disconnect(struct gb_bundle *bundle)
1259 {
1260         struct gb_loopback *gb = greybus_get_drvdata(bundle);
1261         unsigned long flags;
1262
1263         gb_connection_disable(gb->connection);
1264
1265         if (!IS_ERR_OR_NULL(gb->task))
1266                 kthread_stop(gb->task);
1267
1268         kfifo_free(&gb->kfifo_lat);
1269         kfifo_free(&gb->kfifo_ts);
1270         gb_connection_latency_tag_disable(gb->connection);
1271         debugfs_remove(gb->file);
1272
1273         /*
1274          * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
1275          * is disabled at the beginning and so we can't have any more
1276          * incoming/outgoing requests.
1277          */
1278         gb_loopback_async_wait_all(gb);
1279
1280         spin_lock_irqsave(&gb_dev.lock, flags);
1281         gb_dev.count--;
1282         list_del(&gb->entry);
1283         spin_unlock_irqrestore(&gb_dev.lock, flags);
1284
1285         device_unregister(gb->dev);
1286         ida_simple_remove(&loopback_ida, gb->id);
1287
1288         gb_connection_destroy(gb->connection);
1289         kfree(gb);
1290 }
1291
1292 static const struct greybus_bundle_id gb_loopback_id_table[] = {
1293         { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) },
1294         { }
1295 };
1296 MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table);
1297
1298 static struct greybus_driver gb_loopback_driver = {
1299         .name           = "loopback",
1300         .probe          = gb_loopback_probe,
1301         .disconnect     = gb_loopback_disconnect,
1302         .id_table       = gb_loopback_id_table,
1303 };
1304
1305 static int loopback_init(void)
1306 {
1307         int retval;
1308
1309         INIT_LIST_HEAD(&gb_dev.list);
1310         INIT_LIST_HEAD(&gb_dev.list_op_async);
1311         spin_lock_init(&gb_dev.lock);
1312         gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
1313
1314         retval = class_register(&loopback_class);
1315         if (retval)
1316                 goto err;
1317
1318         retval = greybus_register(&gb_loopback_driver);
1319         if (retval)
1320                 goto err_unregister;
1321
1322         return 0;
1323
1324 err_unregister:
1325         class_unregister(&loopback_class);
1326 err:
1327         debugfs_remove_recursive(gb_dev.root);
1328         return retval;
1329 }
1330 module_init(loopback_init);
1331
1332 static void __exit loopback_exit(void)
1333 {
1334         debugfs_remove_recursive(gb_dev.root);
1335         greybus_deregister(&gb_loopback_driver);
1336         class_unregister(&loopback_class);
1337         ida_destroy(&loopback_ida);
1338 }
1339 module_exit(loopback_exit);
1340
1341 MODULE_LICENSE("GPL v2");