Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[cascardo/linux.git] / drivers / staging / ozwpan / ozpd.c
1 /* -----------------------------------------------------------------------------
2  * Copyright (c) 2011 Ozmo Inc
3  * Released under the GNU General Public License Version 2 (GPLv2).
4  * -----------------------------------------------------------------------------
5  */
6
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/timer.h>
10 #include <linux/sched.h>
11 #include <linux/netdevice.h>
12 #include <linux/errno.h>
13 #include "ozdbg.h"
14 #include "ozprotocol.h"
15 #include "ozeltbuf.h"
16 #include "ozpd.h"
17 #include "ozproto.h"
18 #include "ozcdev.h"
19 #include "ozusbsvc.h"
20 #include <asm/unaligned.h>
21 #include <linux/uaccess.h>
22 #include <net/psnap.h>
23
24 #define OZ_MAX_TX_POOL_SIZE     6
25
26 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
27 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
28 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
29 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
30 static int oz_send_isoc_frame(struct oz_pd *pd);
31 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
32 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
33 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
34 static void oz_isoc_destructor(struct sk_buff *skb);
35 static int oz_def_app_init(void);
36 static void oz_def_app_term(void);
37 static int oz_def_app_start(struct oz_pd *pd, int resume);
38 static void oz_def_app_stop(struct oz_pd *pd, int pause);
39 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
40
41 /*
42  * Counts the uncompleted isoc frames submitted to netcard.
43  */
44 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
45
46 /* Application handler functions.
47  */
48 static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
49         {oz_usb_init,
50         oz_usb_term,
51         oz_usb_start,
52         oz_usb_stop,
53         oz_usb_rx,
54         oz_usb_heartbeat,
55         oz_usb_farewell,
56         OZ_APPID_USB},
57
58         {oz_def_app_init,
59         oz_def_app_term,
60         oz_def_app_start,
61         oz_def_app_stop,
62         oz_def_app_rx,
63         NULL,
64         NULL,
65         OZ_APPID_UNUSED1},
66
67         {oz_def_app_init,
68         oz_def_app_term,
69         oz_def_app_start,
70         oz_def_app_stop,
71         oz_def_app_rx,
72         NULL,
73         NULL,
74         OZ_APPID_UNUSED2},
75
76         {oz_cdev_init,
77         oz_cdev_term,
78         oz_cdev_start,
79         oz_cdev_stop,
80         oz_cdev_rx,
81         NULL,
82         NULL,
83         OZ_APPID_SERIAL},
84 };
85
86 /*
87  * Context: process
88  */
89 static int oz_def_app_init(void)
90 {
91         return 0;
92 }
93
94 /*
95  * Context: process
96  */
97 static void oz_def_app_term(void)
98 {
99 }
100
101 /*
102  * Context: softirq
103  */
104 static int oz_def_app_start(struct oz_pd *pd, int resume)
105 {
106         return 0;
107 }
108
109 /*
110  * Context: softirq
111  */
112 static void oz_def_app_stop(struct oz_pd *pd, int pause)
113 {
114 }
115
116 /*
117  * Context: softirq
118  */
119 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
120 {
121 }
122
123 /*
124  * Context: softirq or process
125  */
126 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
127 {
128         pd->state = state;
129         switch (state) {
130         case OZ_PD_S_IDLE:
131                 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
132                 break;
133         case OZ_PD_S_CONNECTED:
134                 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
135                 break;
136         case OZ_PD_S_STOPPED:
137                 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
138                 break;
139         case OZ_PD_S_SLEEP:
140                 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
141                 break;
142         }
143 }
144
145 /*
146  * Context: softirq or process
147  */
148 void oz_pd_get(struct oz_pd *pd)
149 {
150         atomic_inc(&pd->ref_count);
151 }
152
153 /*
154  * Context: softirq or process
155  */
156 void oz_pd_put(struct oz_pd *pd)
157 {
158         if (atomic_dec_and_test(&pd->ref_count))
159                 oz_pd_destroy(pd);
160 }
161
162 /*
163  * Context: softirq-serialized
164  */
165 struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
166 {
167         struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
168
169         if (pd) {
170                 int i;
171                 atomic_set(&pd->ref_count, 2);
172                 for (i = 0; i < OZ_APPID_MAX; i++)
173                         spin_lock_init(&pd->app_lock[i]);
174                 pd->last_rx_pkt_num = 0xffffffff;
175                 oz_pd_set_state(pd, OZ_PD_S_IDLE);
176                 pd->max_tx_size = OZ_MAX_TX_SIZE;
177                 memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
178                 if (0 != oz_elt_buf_init(&pd->elt_buff)) {
179                         kfree(pd);
180                         pd = NULL;
181                 }
182                 spin_lock_init(&pd->tx_frame_lock);
183                 INIT_LIST_HEAD(&pd->tx_queue);
184                 INIT_LIST_HEAD(&pd->farewell_list);
185                 pd->last_sent_frame = &pd->tx_queue;
186                 spin_lock_init(&pd->stream_lock);
187                 INIT_LIST_HEAD(&pd->stream_list);
188                 tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
189                                                         (unsigned long)pd);
190                 tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
191                                                         (unsigned long)pd);
192                 hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
193                 hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
194                 pd->heartbeat.function = oz_pd_heartbeat_event;
195                 pd->timeout.function = oz_pd_timeout_event;
196         }
197         return pd;
198 }
199
200 /*
201  * Context: softirq or process
202  */
203 static void oz_pd_free(struct work_struct *work)
204 {
205         struct list_head *e;
206         struct oz_tx_frame *f;
207         struct oz_isoc_stream *st;
208         struct oz_farewell *fwell;
209         struct oz_pd *pd;
210
211         oz_pd_dbg(pd, ON, "Destroying PD\n");
212         pd = container_of(work, struct oz_pd, workitem);
213         /*Disable timer tasklets*/
214         tasklet_kill(&pd->heartbeat_tasklet);
215         tasklet_kill(&pd->timeout_tasklet);
216         /* Delete any streams.
217          */
218         e = pd->stream_list.next;
219         while (e != &pd->stream_list) {
220                 st = container_of(e, struct oz_isoc_stream, link);
221                 e = e->next;
222                 oz_isoc_stream_free(st);
223         }
224         /* Free any queued tx frames.
225          */
226         e = pd->tx_queue.next;
227         while (e != &pd->tx_queue) {
228                 f = container_of(e, struct oz_tx_frame, link);
229                 e = e->next;
230                 if (f->skb != NULL)
231                         kfree_skb(f->skb);
232                 oz_retire_frame(pd, f);
233         }
234         oz_elt_buf_term(&pd->elt_buff);
235         /* Free any farewells.
236          */
237         e = pd->farewell_list.next;
238         while (e != &pd->farewell_list) {
239                 fwell = container_of(e, struct oz_farewell, link);
240                 e = e->next;
241                 kfree(fwell);
242         }
243         /* Deallocate all frames in tx pool.
244          */
245         while (pd->tx_pool) {
246                 e = pd->tx_pool;
247                 pd->tx_pool = e->next;
248                 kfree(container_of(e, struct oz_tx_frame, link));
249         }
250         if (pd->net_dev)
251                 dev_put(pd->net_dev);
252         kfree(pd);
253 }
254
255 /*
256  * Context: softirq or Process
257  */
258 void oz_pd_destroy(struct oz_pd *pd)
259 {
260         if (hrtimer_active(&pd->timeout))
261                 hrtimer_cancel(&pd->timeout);
262         if (hrtimer_active(&pd->heartbeat))
263                 hrtimer_cancel(&pd->heartbeat);
264
265         INIT_WORK(&pd->workitem, oz_pd_free);
266         if (!schedule_work(&pd->workitem))
267                 oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
268 }
269
270 /*
271  * Context: softirq-serialized
272  */
273 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
274 {
275         const struct oz_app_if *ai;
276         int rc = 0;
277
278         oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
279         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
280                 if (apps & (1<<ai->app_id)) {
281                         if (ai->start(pd, resume)) {
282                                 rc = -1;
283                                 oz_pd_dbg(pd, ON,
284                                           "Unable to start service %d\n",
285                                           ai->app_id);
286                                 break;
287                         }
288                         oz_polling_lock_bh();
289                         pd->total_apps |= (1<<ai->app_id);
290                         if (resume)
291                                 pd->paused_apps &= ~(1<<ai->app_id);
292                         oz_polling_unlock_bh();
293                 }
294         }
295         return rc;
296 }
297
298 /*
299  * Context: softirq or process
300  */
301 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
302 {
303         const struct oz_app_if *ai;
304
305         oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
306         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
307                 if (apps & (1<<ai->app_id)) {
308                         oz_polling_lock_bh();
309                         if (pause) {
310                                 pd->paused_apps |= (1<<ai->app_id);
311                         } else {
312                                 pd->total_apps &= ~(1<<ai->app_id);
313                                 pd->paused_apps &= ~(1<<ai->app_id);
314                         }
315                         oz_polling_unlock_bh();
316                         ai->stop(pd, pause);
317                 }
318         }
319 }
320
321 /*
322  * Context: softirq
323  */
324 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
325 {
326         const struct oz_app_if *ai;
327         int more = 0;
328
329         for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
330                 if (ai->heartbeat && (apps & (1<<ai->app_id))) {
331                         if (ai->heartbeat(pd))
332                                 more = 1;
333                 }
334         }
335         if ((!more) && (hrtimer_active(&pd->heartbeat)))
336                 hrtimer_cancel(&pd->heartbeat);
337         if (pd->mode & OZ_F_ISOC_ANYTIME) {
338                 int count = 8;
339                 while (count-- && (oz_send_isoc_frame(pd) >= 0))
340                         ;
341         }
342 }
343
344 /*
345  * Context: softirq or process
346  */
347 void oz_pd_stop(struct oz_pd *pd)
348 {
349         u16 stop_apps;
350
351         oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
352         oz_pd_indicate_farewells(pd);
353         oz_polling_lock_bh();
354         stop_apps = pd->total_apps;
355         pd->total_apps = 0;
356         pd->paused_apps = 0;
357         oz_polling_unlock_bh();
358         oz_services_stop(pd, stop_apps, 0);
359         oz_polling_lock_bh();
360         oz_pd_set_state(pd, OZ_PD_S_STOPPED);
361         /* Remove from PD list.*/
362         list_del(&pd->link);
363         oz_polling_unlock_bh();
364         oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
365         oz_pd_put(pd);
366 }
367
368 /*
369  * Context: softirq
370  */
371 int oz_pd_sleep(struct oz_pd *pd)
372 {
373         int do_stop = 0;
374         u16 stop_apps;
375
376         oz_polling_lock_bh();
377         if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
378                 oz_polling_unlock_bh();
379                 return 0;
380         }
381         if (pd->keep_alive && pd->session_id)
382                 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
383         else
384                 do_stop = 1;
385
386         stop_apps = pd->total_apps;
387         oz_polling_unlock_bh();
388         if (do_stop) {
389                 oz_pd_stop(pd);
390         } else {
391                 oz_services_stop(pd, stop_apps, 1);
392                 oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
393         }
394         return do_stop;
395 }
396
397 /*
398  * Context: softirq
399  */
400 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
401 {
402         struct oz_tx_frame *f = NULL;
403
404         spin_lock_bh(&pd->tx_frame_lock);
405         if (pd->tx_pool) {
406                 f = container_of(pd->tx_pool, struct oz_tx_frame, link);
407                 pd->tx_pool = pd->tx_pool->next;
408                 pd->tx_pool_count--;
409         }
410         spin_unlock_bh(&pd->tx_frame_lock);
411         if (f == NULL)
412                 f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
413         if (f) {
414                 f->total_size = sizeof(struct oz_hdr);
415                 INIT_LIST_HEAD(&f->link);
416                 INIT_LIST_HEAD(&f->elt_list);
417         }
418         return f;
419 }
420
421 /*
422  * Context: softirq or process
423  */
424 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
425 {
426         pd->nb_queued_isoc_frames--;
427         list_del_init(&f->link);
428         if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
429                 f->link.next = pd->tx_pool;
430                 pd->tx_pool = &f->link;
431                 pd->tx_pool_count++;
432         } else {
433                 kfree(f);
434         }
435         oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
436                pd->nb_queued_isoc_frames);
437 }
438
439 /*
440  * Context: softirq or process
441  */
442 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
443 {
444         spin_lock_bh(&pd->tx_frame_lock);
445         if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
446                 f->link.next = pd->tx_pool;
447                 pd->tx_pool = &f->link;
448                 pd->tx_pool_count++;
449                 f = NULL;
450         }
451         spin_unlock_bh(&pd->tx_frame_lock);
452         kfree(f);
453 }
454
455 /*
456  * Context: softirq-serialized
457  */
458 static void oz_set_more_bit(struct sk_buff *skb)
459 {
460         struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
461
462         oz_hdr->control |= OZ_F_MORE_DATA;
463 }
464
465 /*
466  * Context: softirq-serialized
467  */
468 static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
469 {
470         struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
471
472         oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
473 }
474
475 /*
476  * Context: softirq
477  */
478 int oz_prepare_frame(struct oz_pd *pd, int empty)
479 {
480         struct oz_tx_frame *f;
481
482         if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
483                 return -1;
484         if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
485                 return -1;
486         if (!empty && !oz_are_elts_available(&pd->elt_buff))
487                 return -1;
488         f = oz_tx_frame_alloc(pd);
489         if (f == NULL)
490                 return -1;
491         f->skb = NULL;
492         f->hdr.control =
493                 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
494         ++pd->last_tx_pkt_num;
495         put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
496         if (empty == 0) {
497                 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
498                         pd->max_tx_size, &f->elt_list);
499         }
500         spin_lock(&pd->tx_frame_lock);
501         list_add_tail(&f->link, &pd->tx_queue);
502         pd->nb_queued_frames++;
503         spin_unlock(&pd->tx_frame_lock);
504         return 0;
505 }
506
507 /*
508  * Context: softirq-serialized
509  */
510 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
511 {
512         struct sk_buff *skb;
513         struct net_device *dev = pd->net_dev;
514         struct oz_hdr *oz_hdr;
515         struct oz_elt *elt;
516         struct list_head *e;
517
518         /* Allocate skb with enough space for the lower layers as well
519          * as the space we need.
520          */
521         skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
522         if (skb == NULL)
523                 return NULL;
524         /* Reserve the head room for lower layers.
525          */
526         skb_reserve(skb, LL_RESERVED_SPACE(dev));
527         skb_reset_network_header(skb);
528         skb->dev = dev;
529         skb->protocol = htons(OZ_ETHERTYPE);
530         if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
531                 dev->dev_addr, skb->len) < 0)
532                 goto fail;
533         /* Push the tail to the end of the area we are going to copy to.
534          */
535         oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
536         f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
537         memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
538         /* Copy the elements into the frame body.
539          */
540         elt = (struct oz_elt *)(oz_hdr+1);
541         for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
542                 struct oz_elt_info *ei;
543                 ei = container_of(e, struct oz_elt_info, link);
544                 memcpy(elt, ei->data, ei->length);
545                 elt = oz_next_elt(elt);
546         }
547         return skb;
548 fail:
549         kfree_skb(skb);
550         return NULL;
551 }
552
553 /*
554  * Context: softirq or process
555  */
556 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
557 {
558         struct list_head *e;
559         struct oz_elt_info *ei;
560
561         e = f->elt_list.next;
562         while (e != &f->elt_list) {
563                 ei = container_of(e, struct oz_elt_info, link);
564                 e = e->next;
565                 list_del_init(&ei->link);
566                 if (ei->callback)
567                         ei->callback(pd, ei->context);
568                 spin_lock_bh(&pd->elt_buff.lock);
569                 oz_elt_info_free(&pd->elt_buff, ei);
570                 spin_unlock_bh(&pd->elt_buff.lock);
571         }
572         oz_tx_frame_free(pd, f);
573         if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
574                 oz_trim_elt_pool(&pd->elt_buff);
575 }
576
577 /*
578  * Context: softirq-serialized
579  */
580 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
581 {
582         struct sk_buff *skb;
583         struct oz_tx_frame *f;
584         struct list_head *e;
585
586         spin_lock(&pd->tx_frame_lock);
587         e = pd->last_sent_frame->next;
588         if (e == &pd->tx_queue) {
589                 spin_unlock(&pd->tx_frame_lock);
590                 return -1;
591         }
592         f = container_of(e, struct oz_tx_frame, link);
593
594         if (f->skb != NULL) {
595                 skb = f->skb;
596                 oz_tx_isoc_free(pd, f);
597                 spin_unlock(&pd->tx_frame_lock);
598                 if (more_data)
599                         oz_set_more_bit(skb);
600                 oz_set_last_pkt_nb(pd, skb);
601                 if ((int)atomic_read(&g_submitted_isoc) <
602                                                         OZ_MAX_SUBMITTED_ISOC) {
603                         if (dev_queue_xmit(skb) < 0) {
604                                 oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
605                                 return -1;
606                         }
607                         atomic_inc(&g_submitted_isoc);
608                         oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
609                                pd->nb_queued_isoc_frames);
610                         return 0;
611                 } else {
612                         kfree_skb(skb);
613                         oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
614                         return -1;
615                 }
616         }
617
618         pd->last_sent_frame = e;
619         skb = oz_build_frame(pd, f);
620         spin_unlock(&pd->tx_frame_lock);
621         if (!skb)
622                 return -1;
623         if (more_data)
624                 oz_set_more_bit(skb);
625         oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
626         if (dev_queue_xmit(skb) < 0)
627                 return -1;
628
629         return 0;
630 }
631
632 /*
633  * Context: softirq-serialized
634  */
635 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
636 {
637         while (oz_prepare_frame(pd, 0) >= 0)
638                 backlog++;
639
640         switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
641
642                 case OZ_F_ISOC_NO_ELTS: {
643                         backlog += pd->nb_queued_isoc_frames;
644                         if (backlog <= 0)
645                                 goto out;
646                         if (backlog > OZ_MAX_SUBMITTED_ISOC)
647                                 backlog = OZ_MAX_SUBMITTED_ISOC;
648                         break;
649                 }
650                 case OZ_NO_ELTS_ANYTIME: {
651                         if ((backlog <= 0) && (pd->isoc_sent == 0))
652                                 goto out;
653                         break;
654                 }
655                 default: {
656                         if (backlog <= 0)
657                                 goto out;
658                         break;
659                 }
660         }
661         while (backlog--) {
662                 if (oz_send_next_queued_frame(pd, backlog) < 0)
663                         break;
664         }
665         return;
666
667 out:    oz_prepare_frame(pd, 1);
668         oz_send_next_queued_frame(pd, 0);
669 }
670
671 /*
672  * Context: softirq
673  */
674 static int oz_send_isoc_frame(struct oz_pd *pd)
675 {
676         struct sk_buff *skb;
677         struct net_device *dev = pd->net_dev;
678         struct oz_hdr *oz_hdr;
679         struct oz_elt *elt;
680         struct list_head *e;
681         struct list_head list;
682         int total_size = sizeof(struct oz_hdr);
683
684         INIT_LIST_HEAD(&list);
685
686         oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
687                 pd->max_tx_size, &list);
688         if (list.next == &list)
689                 return 0;
690         skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
691         if (skb == NULL) {
692                 oz_dbg(ON, "Cannot alloc skb\n");
693                 oz_elt_info_free_chain(&pd->elt_buff, &list);
694                 return -1;
695         }
696         skb_reserve(skb, LL_RESERVED_SPACE(dev));
697         skb_reset_network_header(skb);
698         skb->dev = dev;
699         skb->protocol = htons(OZ_ETHERTYPE);
700         if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
701                 dev->dev_addr, skb->len) < 0) {
702                 kfree_skb(skb);
703                 return -1;
704         }
705         oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
706         oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
707         oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
708         elt = (struct oz_elt *)(oz_hdr+1);
709
710         for (e = list.next; e != &list; e = e->next) {
711                 struct oz_elt_info *ei;
712                 ei = container_of(e, struct oz_elt_info, link);
713                 memcpy(elt, ei->data, ei->length);
714                 elt = oz_next_elt(elt);
715         }
716         dev_queue_xmit(skb);
717         oz_elt_info_free_chain(&pd->elt_buff, &list);
718         return 0;
719 }
720
721 /*
722  * Context: softirq-serialized
723  */
724 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
725 {
726         struct list_head *e;
727         struct oz_tx_frame *f;
728         struct list_head *first = NULL;
729         struct list_head *last = NULL;
730         u8 diff;
731         u32 pkt_num;
732
733         spin_lock(&pd->tx_frame_lock);
734         e = pd->tx_queue.next;
735         while (e != &pd->tx_queue) {
736                 f = container_of(e, struct oz_tx_frame, link);
737                 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
738                 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
739                 if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
740                         break;
741                 oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
742                        pkt_num, pd->nb_queued_frames);
743                 if (first == NULL)
744                         first = e;
745                 last = e;
746                 e = e->next;
747                 pd->nb_queued_frames--;
748         }
749         if (first) {
750                 last->next->prev = &pd->tx_queue;
751                 pd->tx_queue.next = last->next;
752                 last->next = NULL;
753         }
754         pd->last_sent_frame = &pd->tx_queue;
755         spin_unlock(&pd->tx_frame_lock);
756         while (first) {
757                 f = container_of(first, struct oz_tx_frame, link);
758                 first = first->next;
759                 oz_retire_frame(pd, f);
760         }
761 }
762
763 /*
764  * Precondition: stream_lock must be held.
765  * Context: softirq
766  */
767 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
768 {
769         struct list_head *e;
770         struct oz_isoc_stream *st;
771
772         list_for_each(e, &pd->stream_list) {
773                 st = container_of(e, struct oz_isoc_stream, link);
774                 if (st->ep_num == ep_num)
775                         return st;
776         }
777         return NULL;
778 }
779
780 /*
781  * Context: softirq
782  */
783 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
784 {
785         struct oz_isoc_stream *st =
786                 kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
787         if (!st)
788                 return -ENOMEM;
789         st->ep_num = ep_num;
790         spin_lock_bh(&pd->stream_lock);
791         if (!pd_stream_find(pd, ep_num)) {
792                 list_add(&st->link, &pd->stream_list);
793                 st = NULL;
794         }
795         spin_unlock_bh(&pd->stream_lock);
796         kfree(st);
797         return 0;
798 }
799
800 /*
801  * Context: softirq or process
802  */
803 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
804 {
805         kfree_skb(st->skb);
806         kfree(st);
807 }
808
809 /*
810  * Context: softirq
811  */
812 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
813 {
814         struct oz_isoc_stream *st;
815
816         spin_lock_bh(&pd->stream_lock);
817         st = pd_stream_find(pd, ep_num);
818         if (st)
819                 list_del(&st->link);
820         spin_unlock_bh(&pd->stream_lock);
821         if (st)
822                 oz_isoc_stream_free(st);
823         return 0;
824 }
825
826 /*
827  * Context: any
828  */
829 static void oz_isoc_destructor(struct sk_buff *skb)
830 {
831         atomic_dec(&g_submitted_isoc);
832 }
833
834 /*
835  * Context: softirq
836  */
837 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
838 {
839         struct net_device *dev = pd->net_dev;
840         struct oz_isoc_stream *st;
841         u8 nb_units = 0;
842         struct sk_buff *skb = NULL;
843         struct oz_hdr *oz_hdr = NULL;
844         int size = 0;
845
846         spin_lock_bh(&pd->stream_lock);
847         st = pd_stream_find(pd, ep_num);
848         if (st) {
849                 skb = st->skb;
850                 st->skb = NULL;
851                 nb_units = st->nb_units;
852                 st->nb_units = 0;
853                 oz_hdr = st->oz_hdr;
854                 size = st->size;
855         }
856         spin_unlock_bh(&pd->stream_lock);
857         if (!st)
858                 return 0;
859         if (!skb) {
860                 /* Allocate enough space for max size frame. */
861                 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
862                                 GFP_ATOMIC);
863                 if (skb == NULL)
864                         return 0;
865                 /* Reserve the head room for lower layers. */
866                 skb_reserve(skb, LL_RESERVED_SPACE(dev));
867                 skb_reset_network_header(skb);
868                 skb->dev = dev;
869                 skb->protocol = htons(OZ_ETHERTYPE);
870                 /* For audio packet set priority to AC_VO */
871                 skb->priority = 0x7;
872                 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
873                 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
874         }
875         memcpy(skb_put(skb, len), data, len);
876         size += len;
877         if (++nb_units < pd->ms_per_isoc) {
878                 spin_lock_bh(&pd->stream_lock);
879                 st->skb = skb;
880                 st->nb_units = nb_units;
881                 st->oz_hdr = oz_hdr;
882                 st->size = size;
883                 spin_unlock_bh(&pd->stream_lock);
884         } else {
885                 struct oz_hdr oz;
886                 struct oz_isoc_large iso;
887                 spin_lock_bh(&pd->stream_lock);
888                 iso.frame_number = st->frame_num;
889                 st->frame_num += nb_units;
890                 spin_unlock_bh(&pd->stream_lock);
891                 oz.control =
892                         (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
893                 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
894                 oz.pkt_num = 0;
895                 iso.endpoint = ep_num;
896                 iso.format = OZ_DATA_F_ISOC_LARGE;
897                 iso.ms_data = nb_units;
898                 memcpy(oz_hdr, &oz, sizeof(oz));
899                 memcpy(oz_hdr+1, &iso, sizeof(iso));
900                 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
901                                 dev->dev_addr, skb->len) < 0)
902                         goto out;
903
904                 skb->destructor = oz_isoc_destructor;
905                 /*Queue for Xmit if mode is not ANYTIME*/
906                 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
907                         struct oz_tx_frame *isoc_unit = NULL;
908                         int nb = pd->nb_queued_isoc_frames;
909                         if (nb >= pd->isoc_latency) {
910                                 struct list_head *e;
911                                 struct oz_tx_frame *f;
912                                 oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
913                                        nb);
914                                 spin_lock(&pd->tx_frame_lock);
915                                 list_for_each(e, &pd->tx_queue) {
916                                         f = container_of(e, struct oz_tx_frame,
917                                                                         link);
918                                         if (f->skb != NULL) {
919                                                 oz_tx_isoc_free(pd, f);
920                                                 break;
921                                         }
922                                 }
923                                 spin_unlock(&pd->tx_frame_lock);
924                         }
925                         isoc_unit = oz_tx_frame_alloc(pd);
926                         if (isoc_unit == NULL)
927                                 goto out;
928                         isoc_unit->hdr = oz;
929                         isoc_unit->skb = skb;
930                         spin_lock_bh(&pd->tx_frame_lock);
931                         list_add_tail(&isoc_unit->link, &pd->tx_queue);
932                         pd->nb_queued_isoc_frames++;
933                         spin_unlock_bh(&pd->tx_frame_lock);
934                         oz_dbg(TX_FRAMES,
935                                "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
936                                pd->nb_queued_isoc_frames, pd->nb_queued_frames);
937                         return 0;
938                 }
939
940                 /*In ANYTIME mode Xmit unit immediately*/
941                 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
942                         atomic_inc(&g_submitted_isoc);
943                         if (dev_queue_xmit(skb) < 0)
944                                 return -1;
945                         else
946                                 return 0;
947                 }
948
949 out:    kfree_skb(skb);
950         return -1;
951
952         }
953         return 0;
954 }
955
956 /*
957  * Context: process
958  */
959 void oz_apps_init(void)
960 {
961         int i;
962
963         for (i = 0; i < OZ_APPID_MAX; i++)
964                 if (g_app_if[i].init)
965                         g_app_if[i].init();
966 }
967
968 /*
969  * Context: process
970  */
971 void oz_apps_term(void)
972 {
973         int i;
974
975         /* Terminate all the apps. */
976         for (i = 0; i < OZ_APPID_MAX; i++)
977                 if (g_app_if[i].term)
978                         g_app_if[i].term();
979 }
980
981 /*
982  * Context: softirq-serialized
983  */
984 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
985 {
986         const struct oz_app_if *ai;
987
988         if (app_id == 0 || app_id > OZ_APPID_MAX)
989                 return;
990         ai = &g_app_if[app_id-1];
991         ai->rx(pd, elt);
992 }
993
994 /*
995  * Context: softirq or process
996  */
997 void oz_pd_indicate_farewells(struct oz_pd *pd)
998 {
999         struct oz_farewell *f;
1000         const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
1001
1002         while (1) {
1003                 oz_polling_lock_bh();
1004                 if (list_empty(&pd->farewell_list)) {
1005                         oz_polling_unlock_bh();
1006                         break;
1007                 }
1008                 f = list_first_entry(&pd->farewell_list,
1009                                 struct oz_farewell, link);
1010                 list_del(&f->link);
1011                 oz_polling_unlock_bh();
1012                 if (ai->farewell)
1013                         ai->farewell(pd, f->ep_num, f->report, f->len);
1014                 kfree(f);
1015         }
1016 }