b86f408bbb7ebe8ae67aba6f49d05168755f5862
[cascardo/linux.git] / drivers / net / irda / sh_irda.c
1 /*
2  * SuperH IrDA Driver
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6  *
7  * Based on sh_sir.c
8  * Copyright (C) 2009 Renesas Solutions Corp.
9  * Copyright 2006-2009 Analog Devices Inc.
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15
16 /*
17  * CAUTION
18  *
19  * This driver is very simple.
20  * So, it doesn't have below support now
21  *  - MIR/FIR support
22  *  - DMA transfer support
23  *  - FIFO mode support
24  */
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/clk.h>
30 #include <net/irda/wrapper.h>
31 #include <net/irda/irda_device.h>
32
33 #define DRIVER_NAME "sh_irda"
34
35 #if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
36 #define __IRDARAM_LEN   0x13FF
37 #else
38 #define __IRDARAM_LEN   0x1039
39 #endif
40
41 #define IRTMR           0x1F00 /* Transfer mode */
42 #define IRCFR           0x1F02 /* Configuration */
43 #define IRCTR           0x1F04 /* IR control */
44 #define IRTFLR          0x1F20 /* Transmit frame length */
45 #define IRTCTR          0x1F22 /* Transmit control */
46 #define IRRFLR          0x1F40 /* Receive frame length */
47 #define IRRCTR          0x1F42 /* Receive control */
48 #define SIRISR          0x1F60 /* SIR-UART mode interrupt source */
49 #define SIRIMR          0x1F62 /* SIR-UART mode interrupt mask */
50 #define SIRICR          0x1F64 /* SIR-UART mode interrupt clear */
51 #define SIRBCR          0x1F68 /* SIR-UART mode baud rate count */
52 #define MFIRISR         0x1F70 /* MIR/FIR mode interrupt source */
53 #define MFIRIMR         0x1F72 /* MIR/FIR mode interrupt mask */
54 #define MFIRICR         0x1F74 /* MIR/FIR mode interrupt clear */
55 #define CRCCTR          0x1F80 /* CRC engine control */
56 #define CRCIR           0x1F86 /* CRC engine input data */
57 #define CRCCR           0x1F8A /* CRC engine calculation */
58 #define CRCOR           0x1F8E /* CRC engine output data */
59 #define FIFOCP          0x1FC0 /* FIFO current pointer */
60 #define FIFOFP          0x1FC2 /* FIFO follow pointer */
61 #define FIFORSMSK       0x1FC4 /* FIFO receive status mask */
62 #define FIFORSOR        0x1FC6 /* FIFO receive status OR */
63 #define FIFOSEL         0x1FC8 /* FIFO select */
64 #define FIFORS          0x1FCA /* FIFO receive status */
65 #define FIFORFL         0x1FCC /* FIFO receive frame length */
66 #define FIFORAMCP       0x1FCE /* FIFO RAM current pointer */
67 #define FIFORAMFP       0x1FD0 /* FIFO RAM follow pointer */
68 #define BIFCTL          0x1FD2 /* BUS interface control */
69 #define IRDARAM         0x0000 /* IrDA buffer RAM */
70 #define IRDARAM_LEN     __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
71
72 /* IRTMR */
73 #define TMD_MASK        (0x3 << 14) /* Transfer Mode */
74 #define TMD_SIR         (0x0 << 14)
75 #define TMD_MIR         (0x3 << 14)
76 #define TMD_FIR         (0x2 << 14)
77
78 #define FIFORIM         (1 << 8) /* FIFO receive interrupt mask */
79 #define MIM             (1 << 4) /* MIR/FIR Interrupt Mask */
80 #define SIM             (1 << 0) /* SIR Interrupt Mask */
81 #define xIM_MASK        (FIFORIM | MIM | SIM)
82
83 /* IRCFR */
84 #define RTO_SHIFT       8 /* shift for Receive Timeout */
85 #define RTO             (0x3 << RTO_SHIFT)
86
87 /* IRTCTR */
88 #define ARMOD           (1 << 15) /* Auto-Receive Mode */
89 #define TE              (1 <<  0) /* Transmit Enable */
90
91 /* IRRFLR */
92 #define RFL_MASK        (0x1FFF) /* mask for Receive Frame Length */
93
94 /* IRRCTR */
95 #define RE              (1 <<  0) /* Receive Enable */
96
97 /*
98  * SIRISR,  SIRIMR,  SIRICR,
99  * MFIRISR, MFIRIMR, MFIRICR
100  */
101 #define FRE             (1 << 15) /* Frame Receive End */
102 #define TROV            (1 << 11) /* Transfer Area Overflow */
103 #define xIR_9           (1 << 9)
104 #define TOT             xIR_9     /* for SIR     Timeout */
105 #define ABTD            xIR_9     /* for MIR/FIR Abort Detection */
106 #define xIR_8           (1 << 8)
107 #define FER             xIR_8     /* for SIR     Framing Error */
108 #define CRCER           xIR_8     /* for MIR/FIR CRC error */
109 #define FTE             (1 << 7)  /* Frame Transmit End */
110 #define xIR_MASK        (FRE | TROV | xIR_9 | xIR_8 | FTE)
111
112 /* SIRBCR */
113 #define BRC_MASK        (0x3F) /* mask for Baud Rate Count */
114
115 /* CRCCTR */
116 #define CRC_RST         (1 << 15) /* CRC Engine Reset */
117 #define CRC_CT_MASK     0x0FFF    /* mask for CRC Engine Input Data Count */
118
119 /* CRCIR */
120 #define CRC_IN_MASK     0x0FFF    /* mask for CRC Engine Input Data */
121
122 /************************************************************************
123
124
125                         enum / structure
126
127
128 ************************************************************************/
129 enum sh_irda_mode {
130         SH_IRDA_NONE = 0,
131         SH_IRDA_SIR,
132         SH_IRDA_MIR,
133         SH_IRDA_FIR,
134 };
135
136 struct sh_irda_self;
137 struct sh_irda_xir_func {
138         int (*xir_fre)  (struct sh_irda_self *self);
139         int (*xir_trov) (struct sh_irda_self *self);
140         int (*xir_9)    (struct sh_irda_self *self);
141         int (*xir_8)    (struct sh_irda_self *self);
142         int (*xir_fte)  (struct sh_irda_self *self);
143 };
144
145 struct sh_irda_self {
146         void __iomem            *membase;
147         unsigned int            irq;
148         struct clk              *clk;
149
150         struct net_device       *ndev;
151
152         struct irlap_cb         *irlap;
153         struct qos_info         qos;
154
155         iobuff_t                tx_buff;
156         iobuff_t                rx_buff;
157
158         enum sh_irda_mode       mode;
159         spinlock_t              lock;
160
161         struct sh_irda_xir_func *xir_func;
162 };
163
164 /************************************************************************
165
166
167                         common function
168
169
170 ************************************************************************/
171 static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
172 {
173         unsigned long flags;
174
175         spin_lock_irqsave(&self->lock, flags);
176         iowrite16(data, self->membase + offset);
177         spin_unlock_irqrestore(&self->lock, flags);
178 }
179
180 static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
181 {
182         unsigned long flags;
183         u16 ret;
184
185         spin_lock_irqsave(&self->lock, flags);
186         ret = ioread16(self->membase + offset);
187         spin_unlock_irqrestore(&self->lock, flags);
188
189         return ret;
190 }
191
192 static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
193                                u16 mask, u16 data)
194 {
195         unsigned long flags;
196         u16 old, new;
197
198         spin_lock_irqsave(&self->lock, flags);
199         old = ioread16(self->membase + offset);
200         new = (old & ~mask) | data;
201         if (old != new)
202                 iowrite16(data, self->membase + offset);
203         spin_unlock_irqrestore(&self->lock, flags);
204 }
205
206 /************************************************************************
207
208
209                         mode function
210
211
212 ************************************************************************/
213 /*=====================================
214  *
215  *              common
216  *
217  *=====================================*/
218 static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
219 {
220         struct device *dev = &self->ndev->dev;
221
222         sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
223         dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
224 }
225
226 static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
227 {
228         struct device *dev = &self->ndev->dev;
229
230         if (SH_IRDA_SIR != self->mode)
231                 interval = 0;
232
233         if (interval < 0 || interval > 2) {
234                 dev_err(dev, "unsupported timeout interval\n");
235                 return -EINVAL;
236         }
237
238         sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
239         return 0;
240 }
241
242 static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
243 {
244         struct device *dev = &self->ndev->dev;
245         u16 val;
246
247         if (baudrate < 0)
248                 return 0;
249
250         if (SH_IRDA_SIR != self->mode) {
251                 dev_err(dev, "it is not SIR mode\n");
252                 return -EINVAL;
253         }
254
255         /*
256          * Baud rate (bits/s) =
257          *   (48 MHz / 26) / (baud rate counter value + 1) x 16
258          */
259         val = (48000000 / 26 / 16 / baudrate) - 1;
260         dev_dbg(dev, "baudrate = %d,  val = 0x%02x\n", baudrate, val);
261
262         sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
263
264         return 0;
265 }
266
267 static int sh_irda_get_rcv_length(struct sh_irda_self *self)
268 {
269         return RFL_MASK & sh_irda_read(self, IRRFLR);
270 }
271
272 /*=====================================
273  *
274  *              NONE MODE
275  *
276  *=====================================*/
277 static int sh_irda_xir_fre(struct sh_irda_self *self)
278 {
279         struct device *dev = &self->ndev->dev;
280         dev_err(dev, "none mode: frame recv\n");
281         return 0;
282 }
283
284 static int sh_irda_xir_trov(struct sh_irda_self *self)
285 {
286         struct device *dev = &self->ndev->dev;
287         dev_err(dev, "none mode: buffer ram over\n");
288         return 0;
289 }
290
291 static int sh_irda_xir_9(struct sh_irda_self *self)
292 {
293         struct device *dev = &self->ndev->dev;
294         dev_err(dev, "none mode: time over\n");
295         return 0;
296 }
297
298 static int sh_irda_xir_8(struct sh_irda_self *self)
299 {
300         struct device *dev = &self->ndev->dev;
301         dev_err(dev, "none mode: framing error\n");
302         return 0;
303 }
304
305 static int sh_irda_xir_fte(struct sh_irda_self *self)
306 {
307         struct device *dev = &self->ndev->dev;
308         dev_err(dev, "none mode: frame transmit end\n");
309         return 0;
310 }
311
312 static struct sh_irda_xir_func sh_irda_xir_func = {
313         .xir_fre        = sh_irda_xir_fre,
314         .xir_trov       = sh_irda_xir_trov,
315         .xir_9          = sh_irda_xir_9,
316         .xir_8          = sh_irda_xir_8,
317         .xir_fte        = sh_irda_xir_fte,
318 };
319
320 /*=====================================
321  *
322  *              MIR/FIR MODE
323  *
324  * MIR/FIR are not supported now
325  *=====================================*/
326 static struct sh_irda_xir_func sh_irda_mfir_func = {
327         .xir_fre        = sh_irda_xir_fre,
328         .xir_trov       = sh_irda_xir_trov,
329         .xir_9          = sh_irda_xir_9,
330         .xir_8          = sh_irda_xir_8,
331         .xir_fte        = sh_irda_xir_fte,
332 };
333
334 /*=====================================
335  *
336  *              SIR MODE
337  *
338  *=====================================*/
339 static int sh_irda_sir_fre(struct sh_irda_self *self)
340 {
341         struct device *dev = &self->ndev->dev;
342         u16 data16;
343         u8  *data = (u8 *)&data16;
344         int len = sh_irda_get_rcv_length(self);
345         int i, j;
346
347         if (len > IRDARAM_LEN)
348                 len = IRDARAM_LEN;
349
350         dev_dbg(dev, "frame recv length = %d\n", len);
351
352         for (i = 0; i < len; i++) {
353                 j = i % 2;
354                 if (!j)
355                         data16 = sh_irda_read(self, IRDARAM + i);
356
357                 async_unwrap_char(self->ndev, &self->ndev->stats,
358                                   &self->rx_buff, data[j]);
359         }
360         self->ndev->last_rx = jiffies;
361
362         sh_irda_rcv_ctrl(self, 1);
363
364         return 0;
365 }
366
367 static int sh_irda_sir_trov(struct sh_irda_self *self)
368 {
369         struct device *dev = &self->ndev->dev;
370
371         dev_err(dev, "buffer ram over\n");
372         sh_irda_rcv_ctrl(self, 1);
373         return 0;
374 }
375
376 static int sh_irda_sir_tot(struct sh_irda_self *self)
377 {
378         struct device *dev = &self->ndev->dev;
379
380         dev_err(dev, "time over\n");
381         sh_irda_set_baudrate(self, 9600);
382         sh_irda_rcv_ctrl(self, 1);
383         return 0;
384 }
385
386 static int sh_irda_sir_fer(struct sh_irda_self *self)
387 {
388         struct device *dev = &self->ndev->dev;
389
390         dev_err(dev, "framing error\n");
391         sh_irda_rcv_ctrl(self, 1);
392         return 0;
393 }
394
395 static int sh_irda_sir_fte(struct sh_irda_self *self)
396 {
397         struct device *dev = &self->ndev->dev;
398
399         dev_dbg(dev, "frame transmit end\n");
400         netif_wake_queue(self->ndev);
401
402         return 0;
403 }
404
405 static struct sh_irda_xir_func sh_irda_sir_func = {
406         .xir_fre        = sh_irda_sir_fre,
407         .xir_trov       = sh_irda_sir_trov,
408         .xir_9          = sh_irda_sir_tot,
409         .xir_8          = sh_irda_sir_fer,
410         .xir_fte        = sh_irda_sir_fte,
411 };
412
413 static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
414 {
415         struct device *dev = &self->ndev->dev;
416         struct sh_irda_xir_func *func;
417         const char *name;
418         u16 data;
419
420         switch (mode) {
421         case SH_IRDA_SIR:
422                 name    = "SIR";
423                 data    = TMD_SIR;
424                 func    = &sh_irda_sir_func;
425                 break;
426         case SH_IRDA_MIR:
427                 name    = "MIR";
428                 data    = TMD_MIR;
429                 func    = &sh_irda_mfir_func;
430                 break;
431         case SH_IRDA_FIR:
432                 name    = "FIR";
433                 data    = TMD_FIR;
434                 func    = &sh_irda_mfir_func;
435                 break;
436         default:
437                 name    = "NONE";
438                 data    = 0;
439                 func    = &sh_irda_xir_func;
440                 break;
441         }
442
443         self->mode = mode;
444         self->xir_func = func;
445         sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
446
447         dev_dbg(dev, "switch to %s mode", name);
448 }
449
450 /************************************************************************
451
452
453                         irq function
454
455
456 ************************************************************************/
457 static void sh_irda_set_irq_mask(struct sh_irda_self *self)
458 {
459         u16 tmr_hole;
460         u16 xir_reg;
461
462         /* set all mask */
463         sh_irda_update_bits(self, IRTMR,   xIM_MASK, xIM_MASK);
464         sh_irda_update_bits(self, SIRIMR,  xIR_MASK, xIR_MASK);
465         sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
466
467         /* clear irq */
468         sh_irda_update_bits(self, SIRICR,  xIR_MASK, xIR_MASK);
469         sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
470
471         switch (self->mode) {
472         case SH_IRDA_SIR:
473                 tmr_hole        = SIM;
474                 xir_reg         = SIRIMR;
475                 break;
476         case SH_IRDA_MIR:
477         case SH_IRDA_FIR:
478                 tmr_hole        = MIM;
479                 xir_reg         = MFIRIMR;
480                 break;
481         default:
482                 tmr_hole        = 0;
483                 xir_reg         = 0;
484                 break;
485         }
486
487         /* open mask */
488         if (xir_reg) {
489                 sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
490                 sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
491         }
492 }
493
494 static irqreturn_t sh_irda_irq(int irq, void *dev_id)
495 {
496         struct sh_irda_self *self = dev_id;
497         struct sh_irda_xir_func *func = self->xir_func;
498         u16 isr = sh_irda_read(self, SIRISR);
499
500         /* clear irq */
501         sh_irda_write(self, SIRICR, isr);
502
503         if (isr & FRE)
504                 func->xir_fre(self);
505         if (isr & TROV)
506                 func->xir_trov(self);
507         if (isr & xIR_9)
508                 func->xir_9(self);
509         if (isr & xIR_8)
510                 func->xir_8(self);
511         if (isr & FTE)
512                 func->xir_fte(self);
513
514         return IRQ_HANDLED;
515 }
516
517 /************************************************************************
518
519
520                         CRC function
521
522
523 ************************************************************************/
524 static void sh_irda_crc_reset(struct sh_irda_self *self)
525 {
526         sh_irda_write(self, CRCCTR, CRC_RST);
527 }
528
529 static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
530 {
531         sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
532 }
533
534 static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
535 {
536         return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
537 }
538
539 static u16 sh_irda_crc_out(struct sh_irda_self *self)
540 {
541         return sh_irda_read(self, CRCOR);
542 }
543
544 static int sh_irda_crc_init(struct sh_irda_self *self)
545 {
546         struct device *dev = &self->ndev->dev;
547         int ret = -EIO;
548         u16 val;
549
550         sh_irda_crc_reset(self);
551
552         sh_irda_crc_add(self, 0xCC);
553         sh_irda_crc_add(self, 0xF5);
554         sh_irda_crc_add(self, 0xF1);
555         sh_irda_crc_add(self, 0xA7);
556
557         val = sh_irda_crc_cnt(self);
558         if (4 != val) {
559                 dev_err(dev, "CRC count error %x\n", val);
560                 goto crc_init_out;
561         }
562
563         val = sh_irda_crc_out(self);
564         if (0x51DF != val) {
565                 dev_err(dev, "CRC result error%x\n", val);
566                 goto crc_init_out;
567         }
568
569         ret = 0;
570
571 crc_init_out:
572
573         sh_irda_crc_reset(self);
574         return ret;
575 }
576
577 /************************************************************************
578
579
580                         iobuf function
581
582
583 ************************************************************************/
584 static void sh_irda_remove_iobuf(struct sh_irda_self *self)
585 {
586         kfree(self->rx_buff.head);
587
588         self->tx_buff.head = NULL;
589         self->tx_buff.data = NULL;
590         self->rx_buff.head = NULL;
591         self->rx_buff.data = NULL;
592 }
593
594 static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
595 {
596         if (self->rx_buff.head ||
597             self->tx_buff.head) {
598                 dev_err(&self->ndev->dev, "iobuff has already existed.");
599                 return -EINVAL;
600         }
601
602         /* rx_buff */
603         self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
604         if (!self->rx_buff.head)
605                 return -ENOMEM;
606
607         self->rx_buff.truesize  = rxsize;
608         self->rx_buff.in_frame  = FALSE;
609         self->rx_buff.state     = OUTSIDE_FRAME;
610         self->rx_buff.data      = self->rx_buff.head;
611
612         /* tx_buff */
613         self->tx_buff.head      = self->membase + IRDARAM;
614         self->tx_buff.truesize  = IRDARAM_LEN;
615
616         return 0;
617 }
618
619 /************************************************************************
620
621
622                         net_device_ops function
623
624
625 ************************************************************************/
626 static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
627 {
628         struct sh_irda_self *self = netdev_priv(ndev);
629         struct device *dev = &self->ndev->dev;
630         int speed = irda_get_next_speed(skb);
631         int ret;
632
633         dev_dbg(dev, "hard xmit\n");
634
635         netif_stop_queue(ndev);
636         sh_irda_rcv_ctrl(self, 0);
637
638         ret = sh_irda_set_baudrate(self, speed);
639         if (ret < 0)
640                 goto sh_irda_hard_xmit_end;
641
642         self->tx_buff.len = 0;
643         if (skb->len) {
644                 unsigned long flags;
645
646                 spin_lock_irqsave(&self->lock, flags);
647                 self->tx_buff.len = async_wrap_skb(skb,
648                                                    self->tx_buff.head,
649                                                    self->tx_buff.truesize);
650                 spin_unlock_irqrestore(&self->lock, flags);
651
652                 if (self->tx_buff.len > self->tx_buff.truesize)
653                         self->tx_buff.len = self->tx_buff.truesize;
654
655                 sh_irda_write(self, IRTFLR, self->tx_buff.len);
656                 sh_irda_write(self, IRTCTR, ARMOD | TE);
657         } else
658                 goto sh_irda_hard_xmit_end;
659
660         dev_kfree_skb(skb);
661
662         return 0;
663
664 sh_irda_hard_xmit_end:
665         sh_irda_set_baudrate(self, 9600);
666         netif_wake_queue(self->ndev);
667         sh_irda_rcv_ctrl(self, 1);
668         dev_kfree_skb(skb);
669
670         return ret;
671
672 }
673
674 static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
675 {
676         /*
677          * FIXME
678          *
679          * This function is needed for irda framework.
680          * But nothing to do now
681          */
682         return 0;
683 }
684
685 static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
686 {
687         struct sh_irda_self *self = netdev_priv(ndev);
688
689         return &self->ndev->stats;
690 }
691
692 static int sh_irda_open(struct net_device *ndev)
693 {
694         struct sh_irda_self *self = netdev_priv(ndev);
695         int err;
696
697         clk_enable(self->clk);
698         err = sh_irda_crc_init(self);
699         if (err)
700                 goto open_err;
701
702         sh_irda_set_mode(self, SH_IRDA_SIR);
703         sh_irda_set_timeout(self, 2);
704         sh_irda_set_baudrate(self, 9600);
705
706         self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
707         if (!self->irlap) {
708                 err = -ENODEV;
709                 goto open_err;
710         }
711
712         netif_start_queue(ndev);
713         sh_irda_rcv_ctrl(self, 1);
714         sh_irda_set_irq_mask(self);
715
716         dev_info(&ndev->dev, "opened\n");
717
718         return 0;
719
720 open_err:
721         clk_disable(self->clk);
722
723         return err;
724 }
725
726 static int sh_irda_stop(struct net_device *ndev)
727 {
728         struct sh_irda_self *self = netdev_priv(ndev);
729
730         /* Stop IrLAP */
731         if (self->irlap) {
732                 irlap_close(self->irlap);
733                 self->irlap = NULL;
734         }
735
736         netif_stop_queue(ndev);
737
738         dev_info(&ndev->dev, "stoped\n");
739
740         return 0;
741 }
742
743 static const struct net_device_ops sh_irda_ndo = {
744         .ndo_open               = sh_irda_open,
745         .ndo_stop               = sh_irda_stop,
746         .ndo_start_xmit         = sh_irda_hard_xmit,
747         .ndo_do_ioctl           = sh_irda_ioctl,
748         .ndo_get_stats          = sh_irda_stats,
749 };
750
751 /************************************************************************
752
753
754                         platform_driver function
755
756
757 ************************************************************************/
758 static int __devinit sh_irda_probe(struct platform_device *pdev)
759 {
760         struct net_device *ndev;
761         struct sh_irda_self *self;
762         struct resource *res;
763         int irq;
764         int err = -ENOMEM;
765
766         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
767         irq = platform_get_irq(pdev, 0);
768         if (!res || irq < 0) {
769                 dev_err(&pdev->dev, "Not enough platform resources.\n");
770                 goto exit;
771         }
772
773         ndev = alloc_irdadev(sizeof(*self));
774         if (!ndev)
775                 goto exit;
776
777         self = netdev_priv(ndev);
778         self->membase = ioremap_nocache(res->start, resource_size(res));
779         if (!self->membase) {
780                 err = -ENXIO;
781                 dev_err(&pdev->dev, "Unable to ioremap.\n");
782                 goto err_mem_1;
783         }
784
785         err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
786         if (err)
787                 goto err_mem_2;
788
789         self->clk = clk_get(&pdev->dev, NULL);
790         if (IS_ERR(self->clk)) {
791                 dev_err(&pdev->dev, "cannot get irda clock\n");
792                 goto err_mem_3;
793         }
794
795         irda_init_max_qos_capabilies(&self->qos);
796
797         ndev->netdev_ops        = &sh_irda_ndo;
798         ndev->irq               = irq;
799
800         self->ndev                      = ndev;
801         self->qos.baud_rate.bits        &= IR_9600; /* FIXME */
802         self->qos.min_turn_time.bits    = 1; /* 10 ms or more */
803         spin_lock_init(&self->lock);
804
805         irda_qos_bits_to_value(&self->qos);
806
807         err = register_netdev(ndev);
808         if (err)
809                 goto err_mem_4;
810
811         platform_set_drvdata(pdev, ndev);
812
813         if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
814                 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
815                 goto err_mem_4;
816         }
817
818         dev_info(&pdev->dev, "SuperH IrDA probed\n");
819
820         goto exit;
821
822 err_mem_4:
823         clk_put(self->clk);
824 err_mem_3:
825         sh_irda_remove_iobuf(self);
826 err_mem_2:
827         iounmap(self->membase);
828 err_mem_1:
829         free_netdev(ndev);
830 exit:
831         return err;
832 }
833
834 static int __devexit sh_irda_remove(struct platform_device *pdev)
835 {
836         struct net_device *ndev = platform_get_drvdata(pdev);
837         struct sh_irda_self *self = netdev_priv(ndev);
838
839         if (!self)
840                 return 0;
841
842         unregister_netdev(ndev);
843         clk_put(self->clk);
844         sh_irda_remove_iobuf(self);
845         iounmap(self->membase);
846         free_netdev(ndev);
847         platform_set_drvdata(pdev, NULL);
848
849         return 0;
850 }
851
852 static struct platform_driver sh_irda_driver = {
853         .probe  = sh_irda_probe,
854         .remove = __devexit_p(sh_irda_remove),
855         .driver = {
856                 .name   = DRIVER_NAME,
857         },
858 };
859
860 static int __init sh_irda_init(void)
861 {
862         return platform_driver_register(&sh_irda_driver);
863 }
864
865 static void __exit sh_irda_exit(void)
866 {
867         platform_driver_unregister(&sh_irda_driver);
868 }
869
870 module_init(sh_irda_init);
871 module_exit(sh_irda_exit);
872
873 MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
874 MODULE_DESCRIPTION("SuperH IrDA driver");
875 MODULE_LICENSE("GPL");