Merge branch 'for-linus' into next
[cascardo/linux.git] / drivers / tty / serial / imx.c
1 /*
2  * Driver for Motorola/Freescale IMX serial ports
3  *
4  * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
5  *
6  * Author: Sascha Hauer <sascha@saschahauer.de>
7  * Copyright (C) 2004 Pengutronix
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
21 #define SUPPORT_SYSRQ
22 #endif
23
24 #include <linux/module.h>
25 #include <linux/ioport.h>
26 #include <linux/init.h>
27 #include <linux/console.h>
28 #include <linux/sysrq.h>
29 #include <linux/platform_device.h>
30 #include <linux/tty.h>
31 #include <linux/tty_flip.h>
32 #include <linux/serial_core.h>
33 #include <linux/serial.h>
34 #include <linux/clk.h>
35 #include <linux/delay.h>
36 #include <linux/rational.h>
37 #include <linux/slab.h>
38 #include <linux/of.h>
39 #include <linux/of_device.h>
40 #include <linux/io.h>
41 #include <linux/dma-mapping.h>
42
43 #include <asm/irq.h>
44 #include <linux/platform_data/serial-imx.h>
45 #include <linux/platform_data/dma-imx.h>
46
47 #include "serial_mctrl_gpio.h"
48
49 /* Register definitions */
50 #define URXD0 0x0  /* Receiver Register */
51 #define URTX0 0x40 /* Transmitter Register */
52 #define UCR1  0x80 /* Control Register 1 */
53 #define UCR2  0x84 /* Control Register 2 */
54 #define UCR3  0x88 /* Control Register 3 */
55 #define UCR4  0x8c /* Control Register 4 */
56 #define UFCR  0x90 /* FIFO Control Register */
57 #define USR1  0x94 /* Status Register 1 */
58 #define USR2  0x98 /* Status Register 2 */
59 #define UESC  0x9c /* Escape Character Register */
60 #define UTIM  0xa0 /* Escape Timer Register */
61 #define UBIR  0xa4 /* BRM Incremental Register */
62 #define UBMR  0xa8 /* BRM Modulator Register */
63 #define UBRC  0xac /* Baud Rate Count Register */
64 #define IMX21_ONEMS 0xb0 /* One Millisecond register */
65 #define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
66 #define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
67
68 /* UART Control Register Bit Fields.*/
69 #define URXD_DUMMY_READ (1<<16)
70 #define URXD_CHARRDY    (1<<15)
71 #define URXD_ERR        (1<<14)
72 #define URXD_OVRRUN     (1<<13)
73 #define URXD_FRMERR     (1<<12)
74 #define URXD_BRK        (1<<11)
75 #define URXD_PRERR      (1<<10)
76 #define URXD_RX_DATA    (0xFF<<0)
77 #define UCR1_ADEN       (1<<15) /* Auto detect interrupt */
78 #define UCR1_ADBR       (1<<14) /* Auto detect baud rate */
79 #define UCR1_TRDYEN     (1<<13) /* Transmitter ready interrupt enable */
80 #define UCR1_IDEN       (1<<12) /* Idle condition interrupt */
81 #define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
82 #define UCR1_RRDYEN     (1<<9)  /* Recv ready interrupt enable */
83 #define UCR1_RDMAEN     (1<<8)  /* Recv ready DMA enable */
84 #define UCR1_IREN       (1<<7)  /* Infrared interface enable */
85 #define UCR1_TXMPTYEN   (1<<6)  /* Transimitter empty interrupt enable */
86 #define UCR1_RTSDEN     (1<<5)  /* RTS delta interrupt enable */
87 #define UCR1_SNDBRK     (1<<4)  /* Send break */
88 #define UCR1_TDMAEN     (1<<3)  /* Transmitter ready DMA enable */
89 #define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
90 #define UCR1_ATDMAEN    (1<<2)  /* Aging DMA Timer Enable */
91 #define UCR1_DOZE       (1<<1)  /* Doze */
92 #define UCR1_UARTEN     (1<<0)  /* UART enabled */
93 #define UCR2_ESCI       (1<<15) /* Escape seq interrupt enable */
94 #define UCR2_IRTS       (1<<14) /* Ignore RTS pin */
95 #define UCR2_CTSC       (1<<13) /* CTS pin control */
96 #define UCR2_CTS        (1<<12) /* Clear to send */
97 #define UCR2_ESCEN      (1<<11) /* Escape enable */
98 #define UCR2_PREN       (1<<8)  /* Parity enable */
99 #define UCR2_PROE       (1<<7)  /* Parity odd/even */
100 #define UCR2_STPB       (1<<6)  /* Stop */
101 #define UCR2_WS         (1<<5)  /* Word size */
102 #define UCR2_RTSEN      (1<<4)  /* Request to send interrupt enable */
103 #define UCR2_ATEN       (1<<3)  /* Aging Timer Enable */
104 #define UCR2_TXEN       (1<<2)  /* Transmitter enabled */
105 #define UCR2_RXEN       (1<<1)  /* Receiver enabled */
106 #define UCR2_SRST       (1<<0)  /* SW reset */
107 #define UCR3_DTREN      (1<<13) /* DTR interrupt enable */
108 #define UCR3_PARERREN   (1<<12) /* Parity enable */
109 #define UCR3_FRAERREN   (1<<11) /* Frame error interrupt enable */
110 #define UCR3_DSR        (1<<10) /* Data set ready */
111 #define UCR3_DCD        (1<<9)  /* Data carrier detect */
112 #define UCR3_RI         (1<<8)  /* Ring indicator */
113 #define UCR3_ADNIMP     (1<<7)  /* Autobaud Detection Not Improved */
114 #define UCR3_RXDSEN     (1<<6)  /* Receive status interrupt enable */
115 #define UCR3_AIRINTEN   (1<<5)  /* Async IR wake interrupt enable */
116 #define UCR3_AWAKEN     (1<<4)  /* Async wake interrupt enable */
117 #define IMX21_UCR3_RXDMUXSEL    (1<<2)  /* RXD Muxed Input Select */
118 #define UCR3_INVT       (1<<1)  /* Inverted Infrared transmission */
119 #define UCR3_BPEN       (1<<0)  /* Preset registers enable */
120 #define UCR4_CTSTL_SHF  10      /* CTS trigger level shift */
121 #define UCR4_CTSTL_MASK 0x3F    /* CTS trigger is 6 bits wide */
122 #define UCR4_INVR       (1<<9)  /* Inverted infrared reception */
123 #define UCR4_ENIRI      (1<<8)  /* Serial infrared interrupt enable */
124 #define UCR4_WKEN       (1<<7)  /* Wake interrupt enable */
125 #define UCR4_REF16      (1<<6)  /* Ref freq 16 MHz */
126 #define UCR4_IDDMAEN    (1<<6)  /* DMA IDLE Condition Detected */
127 #define UCR4_IRSC       (1<<5)  /* IR special case */
128 #define UCR4_TCEN       (1<<3)  /* Transmit complete interrupt enable */
129 #define UCR4_BKEN       (1<<2)  /* Break condition interrupt enable */
130 #define UCR4_OREN       (1<<1)  /* Receiver overrun interrupt enable */
131 #define UCR4_DREN       (1<<0)  /* Recv data ready interrupt enable */
132 #define UFCR_RXTL_SHF   0       /* Receiver trigger level shift */
133 #define UFCR_DCEDTE     (1<<6)  /* DCE/DTE mode select */
134 #define UFCR_RFDIV      (7<<7)  /* Reference freq divider mask */
135 #define UFCR_RFDIV_REG(x)       (((x) < 7 ? 6 - (x) : 6) << 7)
136 #define UFCR_TXTL_SHF   10      /* Transmitter trigger level shift */
137 #define USR1_PARITYERR  (1<<15) /* Parity error interrupt flag */
138 #define USR1_RTSS       (1<<14) /* RTS pin status */
139 #define USR1_TRDY       (1<<13) /* Transmitter ready interrupt/dma flag */
140 #define USR1_RTSD       (1<<12) /* RTS delta */
141 #define USR1_ESCF       (1<<11) /* Escape seq interrupt flag */
142 #define USR1_FRAMERR    (1<<10) /* Frame error interrupt flag */
143 #define USR1_RRDY       (1<<9)   /* Receiver ready interrupt/dma flag */
144 #define USR1_AGTIM      (1<<8)   /* Ageing timer interrupt flag */
145 #define USR1_TIMEOUT    (1<<7)   /* Receive timeout interrupt status */
146 #define USR1_RXDS        (1<<6)  /* Receiver idle interrupt flag */
147 #define USR1_AIRINT      (1<<5)  /* Async IR wake interrupt flag */
148 #define USR1_AWAKE       (1<<4)  /* Aysnc wake interrupt flag */
149 #define USR2_ADET        (1<<15) /* Auto baud rate detect complete */
150 #define USR2_TXFE        (1<<14) /* Transmit buffer FIFO empty */
151 #define USR2_DTRF        (1<<13) /* DTR edge interrupt flag */
152 #define USR2_IDLE        (1<<12) /* Idle condition */
153 #define USR2_RIDELT      (1<<10) /* Ring Interrupt Delta */
154 #define USR2_RIIN        (1<<9)  /* Ring Indicator Input */
155 #define USR2_IRINT       (1<<8)  /* Serial infrared interrupt flag */
156 #define USR2_WAKE        (1<<7)  /* Wake */
157 #define USR2_DCDIN       (1<<5)  /* Data Carrier Detect Input */
158 #define USR2_RTSF        (1<<4)  /* RTS edge interrupt flag */
159 #define USR2_TXDC        (1<<3)  /* Transmitter complete */
160 #define USR2_BRCD        (1<<2)  /* Break condition */
161 #define USR2_ORE        (1<<1)   /* Overrun error */
162 #define USR2_RDR        (1<<0)   /* Recv data ready */
163 #define UTS_FRCPERR     (1<<13) /* Force parity error */
164 #define UTS_LOOP        (1<<12)  /* Loop tx and rx */
165 #define UTS_TXEMPTY      (1<<6)  /* TxFIFO empty */
166 #define UTS_RXEMPTY      (1<<5)  /* RxFIFO empty */
167 #define UTS_TXFULL       (1<<4)  /* TxFIFO full */
168 #define UTS_RXFULL       (1<<3)  /* RxFIFO full */
169 #define UTS_SOFTRST      (1<<0)  /* Software reset */
170
171 /* We've been assigned a range on the "Low-density serial ports" major */
172 #define SERIAL_IMX_MAJOR        207
173 #define MINOR_START             16
174 #define DEV_NAME                "ttymxc"
175
176 /*
177  * This determines how often we check the modem status signals
178  * for any change.  They generally aren't connected to an IRQ
179  * so we have to poll them.  We also check immediately before
180  * filling the TX fifo incase CTS has been dropped.
181  */
182 #define MCTRL_TIMEOUT   (250*HZ/1000)
183
184 #define DRIVER_NAME "IMX-uart"
185
186 #define UART_NR 8
187
188 /* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
189 enum imx_uart_type {
190         IMX1_UART,
191         IMX21_UART,
192         IMX6Q_UART,
193 };
194
195 /* device type dependent stuff */
196 struct imx_uart_data {
197         unsigned uts_reg;
198         enum imx_uart_type devtype;
199 };
200
201 struct imx_port {
202         struct uart_port        port;
203         struct timer_list       timer;
204         unsigned int            old_status;
205         unsigned int            have_rtscts:1;
206         unsigned int            dte_mode:1;
207         unsigned int            irda_inv_rx:1;
208         unsigned int            irda_inv_tx:1;
209         unsigned short          trcv_delay; /* transceiver delay */
210         struct clk              *clk_ipg;
211         struct clk              *clk_per;
212         const struct imx_uart_data *devdata;
213
214         struct mctrl_gpios *gpios;
215
216         /* DMA fields */
217         unsigned int            dma_is_inited:1;
218         unsigned int            dma_is_enabled:1;
219         unsigned int            dma_is_rxing:1;
220         unsigned int            dma_is_txing:1;
221         struct dma_chan         *dma_chan_rx, *dma_chan_tx;
222         struct scatterlist      rx_sgl, tx_sgl[2];
223         void                    *rx_buf;
224         unsigned int            tx_bytes;
225         unsigned int            dma_tx_nents;
226         wait_queue_head_t       dma_wait;
227         unsigned int            saved_reg[10];
228         bool                    context_saved;
229 };
230
231 struct imx_port_ucrs {
232         unsigned int    ucr1;
233         unsigned int    ucr2;
234         unsigned int    ucr3;
235 };
236
237 static struct imx_uart_data imx_uart_devdata[] = {
238         [IMX1_UART] = {
239                 .uts_reg = IMX1_UTS,
240                 .devtype = IMX1_UART,
241         },
242         [IMX21_UART] = {
243                 .uts_reg = IMX21_UTS,
244                 .devtype = IMX21_UART,
245         },
246         [IMX6Q_UART] = {
247                 .uts_reg = IMX21_UTS,
248                 .devtype = IMX6Q_UART,
249         },
250 };
251
252 static const struct platform_device_id imx_uart_devtype[] = {
253         {
254                 .name = "imx1-uart",
255                 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
256         }, {
257                 .name = "imx21-uart",
258                 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
259         }, {
260                 .name = "imx6q-uart",
261                 .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART],
262         }, {
263                 /* sentinel */
264         }
265 };
266 MODULE_DEVICE_TABLE(platform, imx_uart_devtype);
267
268 static const struct of_device_id imx_uart_dt_ids[] = {
269         { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], },
270         { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
271         { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
272         { /* sentinel */ }
273 };
274 MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
275
276 static inline unsigned uts_reg(struct imx_port *sport)
277 {
278         return sport->devdata->uts_reg;
279 }
280
281 static inline int is_imx1_uart(struct imx_port *sport)
282 {
283         return sport->devdata->devtype == IMX1_UART;
284 }
285
286 static inline int is_imx21_uart(struct imx_port *sport)
287 {
288         return sport->devdata->devtype == IMX21_UART;
289 }
290
291 static inline int is_imx6q_uart(struct imx_port *sport)
292 {
293         return sport->devdata->devtype == IMX6Q_UART;
294 }
295 /*
296  * Save and restore functions for UCR1, UCR2 and UCR3 registers
297  */
298 #if defined(CONFIG_SERIAL_IMX_CONSOLE)
299 static void imx_port_ucrs_save(struct uart_port *port,
300                                struct imx_port_ucrs *ucr)
301 {
302         /* save control registers */
303         ucr->ucr1 = readl(port->membase + UCR1);
304         ucr->ucr2 = readl(port->membase + UCR2);
305         ucr->ucr3 = readl(port->membase + UCR3);
306 }
307
308 static void imx_port_ucrs_restore(struct uart_port *port,
309                                   struct imx_port_ucrs *ucr)
310 {
311         /* restore control registers */
312         writel(ucr->ucr1, port->membase + UCR1);
313         writel(ucr->ucr2, port->membase + UCR2);
314         writel(ucr->ucr3, port->membase + UCR3);
315 }
316 #endif
317
318 static void imx_port_rts_active(struct imx_port *sport, unsigned long *ucr2)
319 {
320         *ucr2 &= ~UCR2_CTSC;
321         *ucr2 |= UCR2_CTS;
322
323         mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS);
324 }
325
326 static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2)
327 {
328         *ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
329
330         mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS);
331 }
332
333 static void imx_port_rts_auto(struct imx_port *sport, unsigned long *ucr2)
334 {
335         *ucr2 |= UCR2_CTSC;
336 }
337
338 /*
339  * interrupts disabled on entry
340  */
341 static void imx_stop_tx(struct uart_port *port)
342 {
343         struct imx_port *sport = (struct imx_port *)port;
344         unsigned long temp;
345
346         /*
347          * We are maybe in the SMP context, so if the DMA TX thread is running
348          * on other cpu, we have to wait for it to finish.
349          */
350         if (sport->dma_is_enabled && sport->dma_is_txing)
351                 return;
352
353         temp = readl(port->membase + UCR1);
354         writel(temp & ~UCR1_TXMPTYEN, port->membase + UCR1);
355
356         /* in rs485 mode disable transmitter if shifter is empty */
357         if (port->rs485.flags & SER_RS485_ENABLED &&
358             readl(port->membase + USR2) & USR2_TXDC) {
359                 temp = readl(port->membase + UCR2);
360                 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
361                         imx_port_rts_inactive(sport, &temp);
362                 else
363                         imx_port_rts_active(sport, &temp);
364                 writel(temp, port->membase + UCR2);
365
366                 temp = readl(port->membase + UCR4);
367                 temp &= ~UCR4_TCEN;
368                 writel(temp, port->membase + UCR4);
369         }
370 }
371
372 /*
373  * interrupts disabled on entry
374  */
375 static void imx_stop_rx(struct uart_port *port)
376 {
377         struct imx_port *sport = (struct imx_port *)port;
378         unsigned long temp;
379
380         if (sport->dma_is_enabled && sport->dma_is_rxing) {
381                 if (sport->port.suspended) {
382                         dmaengine_terminate_all(sport->dma_chan_rx);
383                         sport->dma_is_rxing = 0;
384                 } else {
385                         return;
386                 }
387         }
388
389         temp = readl(sport->port.membase + UCR2);
390         writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
391
392         /* disable the `Receiver Ready Interrrupt` */
393         temp = readl(sport->port.membase + UCR1);
394         writel(temp & ~UCR1_RRDYEN, sport->port.membase + UCR1);
395 }
396
397 /*
398  * Set the modem control timer to fire immediately.
399  */
400 static void imx_enable_ms(struct uart_port *port)
401 {
402         struct imx_port *sport = (struct imx_port *)port;
403
404         mod_timer(&sport->timer, jiffies);
405
406         mctrl_gpio_enable_ms(sport->gpios);
407 }
408
409 static void imx_dma_tx(struct imx_port *sport);
410 static inline void imx_transmit_buffer(struct imx_port *sport)
411 {
412         struct circ_buf *xmit = &sport->port.state->xmit;
413         unsigned long temp;
414
415         if (sport->port.x_char) {
416                 /* Send next char */
417                 writel(sport->port.x_char, sport->port.membase + URTX0);
418                 sport->port.icount.tx++;
419                 sport->port.x_char = 0;
420                 return;
421         }
422
423         if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
424                 imx_stop_tx(&sport->port);
425                 return;
426         }
427
428         if (sport->dma_is_enabled) {
429                 /*
430                  * We've just sent a X-char Ensure the TX DMA is enabled
431                  * and the TX IRQ is disabled.
432                  **/
433                 temp = readl(sport->port.membase + UCR1);
434                 temp &= ~UCR1_TXMPTYEN;
435                 if (sport->dma_is_txing) {
436                         temp |= UCR1_TDMAEN;
437                         writel(temp, sport->port.membase + UCR1);
438                 } else {
439                         writel(temp, sport->port.membase + UCR1);
440                         imx_dma_tx(sport);
441                 }
442         }
443
444         while (!uart_circ_empty(xmit) &&
445                !(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) {
446                 /* send xmit->buf[xmit->tail]
447                  * out the port here */
448                 writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
449                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
450                 sport->port.icount.tx++;
451         }
452
453         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
454                 uart_write_wakeup(&sport->port);
455
456         if (uart_circ_empty(xmit))
457                 imx_stop_tx(&sport->port);
458 }
459
460 static void dma_tx_callback(void *data)
461 {
462         struct imx_port *sport = data;
463         struct scatterlist *sgl = &sport->tx_sgl[0];
464         struct circ_buf *xmit = &sport->port.state->xmit;
465         unsigned long flags;
466         unsigned long temp;
467
468         spin_lock_irqsave(&sport->port.lock, flags);
469
470         dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
471
472         temp = readl(sport->port.membase + UCR1);
473         temp &= ~UCR1_TDMAEN;
474         writel(temp, sport->port.membase + UCR1);
475
476         /* update the stat */
477         xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
478         sport->port.icount.tx += sport->tx_bytes;
479
480         dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
481
482         sport->dma_is_txing = 0;
483
484         spin_unlock_irqrestore(&sport->port.lock, flags);
485
486         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
487                 uart_write_wakeup(&sport->port);
488
489         if (waitqueue_active(&sport->dma_wait)) {
490                 wake_up(&sport->dma_wait);
491                 dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
492                 return;
493         }
494
495         spin_lock_irqsave(&sport->port.lock, flags);
496         if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
497                 imx_dma_tx(sport);
498         spin_unlock_irqrestore(&sport->port.lock, flags);
499 }
500
501 static void imx_dma_tx(struct imx_port *sport)
502 {
503         struct circ_buf *xmit = &sport->port.state->xmit;
504         struct scatterlist *sgl = sport->tx_sgl;
505         struct dma_async_tx_descriptor *desc;
506         struct dma_chan *chan = sport->dma_chan_tx;
507         struct device *dev = sport->port.dev;
508         unsigned long temp;
509         int ret;
510
511         if (sport->dma_is_txing)
512                 return;
513
514         sport->tx_bytes = uart_circ_chars_pending(xmit);
515
516         if (xmit->tail < xmit->head) {
517                 sport->dma_tx_nents = 1;
518                 sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
519         } else {
520                 sport->dma_tx_nents = 2;
521                 sg_init_table(sgl, 2);
522                 sg_set_buf(sgl, xmit->buf + xmit->tail,
523                                 UART_XMIT_SIZE - xmit->tail);
524                 sg_set_buf(sgl + 1, xmit->buf, xmit->head);
525         }
526
527         ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
528         if (ret == 0) {
529                 dev_err(dev, "DMA mapping error for TX.\n");
530                 return;
531         }
532         desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
533                                         DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
534         if (!desc) {
535                 dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
536                              DMA_TO_DEVICE);
537                 dev_err(dev, "We cannot prepare for the TX slave dma!\n");
538                 return;
539         }
540         desc->callback = dma_tx_callback;
541         desc->callback_param = sport;
542
543         dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
544                         uart_circ_chars_pending(xmit));
545
546         temp = readl(sport->port.membase + UCR1);
547         temp |= UCR1_TDMAEN;
548         writel(temp, sport->port.membase + UCR1);
549
550         /* fire it */
551         sport->dma_is_txing = 1;
552         dmaengine_submit(desc);
553         dma_async_issue_pending(chan);
554         return;
555 }
556
557 /*
558  * interrupts disabled on entry
559  */
560 static void imx_start_tx(struct uart_port *port)
561 {
562         struct imx_port *sport = (struct imx_port *)port;
563         unsigned long temp;
564
565         if (port->rs485.flags & SER_RS485_ENABLED) {
566                 temp = readl(port->membase + UCR2);
567                 if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
568                         imx_port_rts_inactive(sport, &temp);
569                 else
570                         imx_port_rts_active(sport, &temp);
571                 writel(temp, port->membase + UCR2);
572
573                 /* enable transmitter and shifter empty irq */
574                 temp = readl(port->membase + UCR4);
575                 temp |= UCR4_TCEN;
576                 writel(temp, port->membase + UCR4);
577         }
578
579         if (!sport->dma_is_enabled) {
580                 temp = readl(sport->port.membase + UCR1);
581                 writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
582         }
583
584         if (sport->dma_is_enabled) {
585                 if (sport->port.x_char) {
586                         /* We have X-char to send, so enable TX IRQ and
587                          * disable TX DMA to let TX interrupt to send X-char */
588                         temp = readl(sport->port.membase + UCR1);
589                         temp &= ~UCR1_TDMAEN;
590                         temp |= UCR1_TXMPTYEN;
591                         writel(temp, sport->port.membase + UCR1);
592                         return;
593                 }
594
595                 if (!uart_circ_empty(&port->state->xmit) &&
596                     !uart_tx_stopped(port))
597                         imx_dma_tx(sport);
598                 return;
599         }
600 }
601
602 static irqreturn_t imx_rtsint(int irq, void *dev_id)
603 {
604         struct imx_port *sport = dev_id;
605         unsigned int val;
606         unsigned long flags;
607
608         spin_lock_irqsave(&sport->port.lock, flags);
609
610         writel(USR1_RTSD, sport->port.membase + USR1);
611         val = readl(sport->port.membase + USR1) & USR1_RTSS;
612         uart_handle_cts_change(&sport->port, !!val);
613         wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
614
615         spin_unlock_irqrestore(&sport->port.lock, flags);
616         return IRQ_HANDLED;
617 }
618
619 static irqreturn_t imx_txint(int irq, void *dev_id)
620 {
621         struct imx_port *sport = dev_id;
622         unsigned long flags;
623
624         spin_lock_irqsave(&sport->port.lock, flags);
625         imx_transmit_buffer(sport);
626         spin_unlock_irqrestore(&sport->port.lock, flags);
627         return IRQ_HANDLED;
628 }
629
630 static irqreturn_t imx_rxint(int irq, void *dev_id)
631 {
632         struct imx_port *sport = dev_id;
633         unsigned int rx, flg, ignored = 0;
634         struct tty_port *port = &sport->port.state->port;
635         unsigned long flags, temp;
636
637         spin_lock_irqsave(&sport->port.lock, flags);
638
639         while (readl(sport->port.membase + USR2) & USR2_RDR) {
640                 flg = TTY_NORMAL;
641                 sport->port.icount.rx++;
642
643                 rx = readl(sport->port.membase + URXD0);
644
645                 temp = readl(sport->port.membase + USR2);
646                 if (temp & USR2_BRCD) {
647                         writel(USR2_BRCD, sport->port.membase + USR2);
648                         if (uart_handle_break(&sport->port))
649                                 continue;
650                 }
651
652                 if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
653                         continue;
654
655                 if (unlikely(rx & URXD_ERR)) {
656                         if (rx & URXD_BRK)
657                                 sport->port.icount.brk++;
658                         else if (rx & URXD_PRERR)
659                                 sport->port.icount.parity++;
660                         else if (rx & URXD_FRMERR)
661                                 sport->port.icount.frame++;
662                         if (rx & URXD_OVRRUN)
663                                 sport->port.icount.overrun++;
664
665                         if (rx & sport->port.ignore_status_mask) {
666                                 if (++ignored > 100)
667                                         goto out;
668                                 continue;
669                         }
670
671                         rx &= (sport->port.read_status_mask | 0xFF);
672
673                         if (rx & URXD_BRK)
674                                 flg = TTY_BREAK;
675                         else if (rx & URXD_PRERR)
676                                 flg = TTY_PARITY;
677                         else if (rx & URXD_FRMERR)
678                                 flg = TTY_FRAME;
679                         if (rx & URXD_OVRRUN)
680                                 flg = TTY_OVERRUN;
681
682 #ifdef SUPPORT_SYSRQ
683                         sport->port.sysrq = 0;
684 #endif
685                 }
686
687                 if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
688                         goto out;
689
690                 if (tty_insert_flip_char(port, rx, flg) == 0)
691                         sport->port.icount.buf_overrun++;
692         }
693
694 out:
695         spin_unlock_irqrestore(&sport->port.lock, flags);
696         tty_flip_buffer_push(port);
697         return IRQ_HANDLED;
698 }
699
700 static int start_rx_dma(struct imx_port *sport);
701 /*
702  * If the RXFIFO is filled with some data, and then we
703  * arise a DMA operation to receive them.
704  */
705 static void imx_dma_rxint(struct imx_port *sport)
706 {
707         unsigned long temp;
708         unsigned long flags;
709
710         spin_lock_irqsave(&sport->port.lock, flags);
711
712         temp = readl(sport->port.membase + USR2);
713         if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
714                 sport->dma_is_rxing = 1;
715
716                 /* disable the receiver ready and aging timer interrupts */
717                 temp = readl(sport->port.membase + UCR1);
718                 temp &= ~(UCR1_RRDYEN);
719                 writel(temp, sport->port.membase + UCR1);
720
721                 temp = readl(sport->port.membase + UCR2);
722                 temp &= ~(UCR2_ATEN);
723                 writel(temp, sport->port.membase + UCR2);
724
725                 /* tell the DMA to receive the data. */
726                 start_rx_dma(sport);
727         }
728
729         spin_unlock_irqrestore(&sport->port.lock, flags);
730 }
731
732 static irqreturn_t imx_int(int irq, void *dev_id)
733 {
734         struct imx_port *sport = dev_id;
735         unsigned int sts;
736         unsigned int sts2;
737
738         sts = readl(sport->port.membase + USR1);
739         sts2 = readl(sport->port.membase + USR2);
740
741         if (sts & (USR1_RRDY | USR1_AGTIM)) {
742                 if (sport->dma_is_enabled)
743                         imx_dma_rxint(sport);
744                 else
745                         imx_rxint(irq, dev_id);
746         }
747
748         if ((sts & USR1_TRDY &&
749              readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) ||
750             (sts2 & USR2_TXDC &&
751              readl(sport->port.membase + UCR4) & UCR4_TCEN))
752                 imx_txint(irq, dev_id);
753
754         if (sts & USR1_RTSD)
755                 imx_rtsint(irq, dev_id);
756
757         if (sts & USR1_AWAKE)
758                 writel(USR1_AWAKE, sport->port.membase + USR1);
759
760         if (sts2 & USR2_ORE) {
761                 sport->port.icount.overrun++;
762                 writel(USR2_ORE, sport->port.membase + USR2);
763         }
764
765         return IRQ_HANDLED;
766 }
767
768 /*
769  * Return TIOCSER_TEMT when transmitter is not busy.
770  */
771 static unsigned int imx_tx_empty(struct uart_port *port)
772 {
773         struct imx_port *sport = (struct imx_port *)port;
774         unsigned int ret;
775
776         ret = (readl(sport->port.membase + USR2) & USR2_TXDC) ?  TIOCSER_TEMT : 0;
777
778         /* If the TX DMA is working, return 0. */
779         if (sport->dma_is_enabled && sport->dma_is_txing)
780                 ret = 0;
781
782         return ret;
783 }
784
785 /*
786  * We have a modem side uart, so the meanings of RTS and CTS are inverted.
787  */
788 static unsigned int imx_get_hwmctrl(struct imx_port *sport)
789 {
790         unsigned int tmp = TIOCM_DSR;
791         unsigned usr1 = readl(sport->port.membase + USR1);
792
793         if (usr1 & USR1_RTSS)
794                 tmp |= TIOCM_CTS;
795
796         /* in DCE mode DCDIN is always 0 */
797         if (!(usr1 & USR2_DCDIN))
798                 tmp |= TIOCM_CAR;
799
800         /* in DCE mode RIIN is always 0 */
801         if (readl(sport->port.membase + USR2) & USR2_RIIN)
802                 tmp |= TIOCM_RI;
803
804         return tmp;
805 }
806
807 static unsigned int imx_get_mctrl(struct uart_port *port)
808 {
809         struct imx_port *sport = (struct imx_port *)port;
810         unsigned int ret = imx_get_hwmctrl(sport);
811
812         mctrl_gpio_get(sport->gpios, &ret);
813
814         return ret;
815 }
816
817 static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
818 {
819         struct imx_port *sport = (struct imx_port *)port;
820         unsigned long temp;
821
822         if (!(port->rs485.flags & SER_RS485_ENABLED)) {
823                 temp = readl(sport->port.membase + UCR2);
824                 temp &= ~(UCR2_CTS | UCR2_CTSC);
825                 if (mctrl & TIOCM_RTS)
826                         temp |= UCR2_CTS | UCR2_CTSC;
827                 writel(temp, sport->port.membase + UCR2);
828         }
829
830         temp = readl(sport->port.membase + UCR3) & ~UCR3_DSR;
831         if (!(mctrl & TIOCM_DTR))
832                 temp |= UCR3_DSR;
833         writel(temp, sport->port.membase + UCR3);
834
835         temp = readl(sport->port.membase + uts_reg(sport)) & ~UTS_LOOP;
836         if (mctrl & TIOCM_LOOP)
837                 temp |= UTS_LOOP;
838         writel(temp, sport->port.membase + uts_reg(sport));
839
840         mctrl_gpio_set(sport->gpios, mctrl);
841 }
842
843 /*
844  * Interrupts always disabled.
845  */
846 static void imx_break_ctl(struct uart_port *port, int break_state)
847 {
848         struct imx_port *sport = (struct imx_port *)port;
849         unsigned long flags, temp;
850
851         spin_lock_irqsave(&sport->port.lock, flags);
852
853         temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK;
854
855         if (break_state != 0)
856                 temp |= UCR1_SNDBRK;
857
858         writel(temp, sport->port.membase + UCR1);
859
860         spin_unlock_irqrestore(&sport->port.lock, flags);
861 }
862
863 /*
864  * Handle any change of modem status signal since we were last called.
865  */
866 static void imx_mctrl_check(struct imx_port *sport)
867 {
868         unsigned int status, changed;
869
870         status = imx_get_hwmctrl(sport);
871         changed = status ^ sport->old_status;
872
873         if (changed == 0)
874                 return;
875
876         sport->old_status = status;
877
878         if (changed & TIOCM_RI)
879                 sport->port.icount.rng++;
880         if (changed & TIOCM_DSR)
881                 sport->port.icount.dsr++;
882         if (changed & TIOCM_CAR)
883                 uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
884         if (changed & TIOCM_CTS)
885                 uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
886
887         wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
888 }
889
890 /*
891  * This is our per-port timeout handler, for checking the
892  * modem status signals.
893  */
894 static void imx_timeout(unsigned long data)
895 {
896         struct imx_port *sport = (struct imx_port *)data;
897         unsigned long flags;
898
899         if (sport->port.state) {
900                 spin_lock_irqsave(&sport->port.lock, flags);
901                 imx_mctrl_check(sport);
902                 spin_unlock_irqrestore(&sport->port.lock, flags);
903
904                 mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
905         }
906 }
907
908 #define RX_BUF_SIZE     (PAGE_SIZE)
909 static void imx_rx_dma_done(struct imx_port *sport)
910 {
911         unsigned long temp;
912         unsigned long flags;
913
914         spin_lock_irqsave(&sport->port.lock, flags);
915
916         /* re-enable interrupts to get notified when new symbols are incoming */
917         temp = readl(sport->port.membase + UCR1);
918         temp |= UCR1_RRDYEN;
919         writel(temp, sport->port.membase + UCR1);
920
921         temp = readl(sport->port.membase + UCR2);
922         temp |= UCR2_ATEN;
923         writel(temp, sport->port.membase + UCR2);
924
925         sport->dma_is_rxing = 0;
926
927         /* Is the shutdown waiting for us? */
928         if (waitqueue_active(&sport->dma_wait))
929                 wake_up(&sport->dma_wait);
930
931         spin_unlock_irqrestore(&sport->port.lock, flags);
932 }
933
934 /*
935  * There are two kinds of RX DMA interrupts(such as in the MX6Q):
936  *   [1] the RX DMA buffer is full.
937  *   [2] the aging timer expires
938  *
939  * Condition [2] is triggered when a character has been sitting in the FIFO
940  * for at least 8 byte durations.
941  */
942 static void dma_rx_callback(void *data)
943 {
944         struct imx_port *sport = data;
945         struct dma_chan *chan = sport->dma_chan_rx;
946         struct scatterlist *sgl = &sport->rx_sgl;
947         struct tty_port *port = &sport->port.state->port;
948         struct dma_tx_state state;
949         enum dma_status status;
950         unsigned int count;
951
952         /* unmap it first */
953         dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
954
955         status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
956         count = RX_BUF_SIZE - state.residue;
957
958         dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
959
960         if (count) {
961                 if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
962                         int bytes = tty_insert_flip_string(port, sport->rx_buf,
963                                         count);
964
965                         if (bytes != count)
966                                 sport->port.icount.buf_overrun++;
967                 }
968                 tty_flip_buffer_push(port);
969                 sport->port.icount.rx += count;
970         }
971
972         /*
973          * Restart RX DMA directly if more data is available in order to skip
974          * the roundtrip through the IRQ handler. If there is some data already
975          * in the FIFO, DMA needs to be restarted soon anyways.
976          *
977          * Otherwise stop the DMA and reactivate FIFO IRQs to restart DMA once
978          * data starts to arrive again.
979          */
980         if (readl(sport->port.membase + USR2) & USR2_RDR)
981                 start_rx_dma(sport);
982         else
983                 imx_rx_dma_done(sport);
984 }
985
986 static int start_rx_dma(struct imx_port *sport)
987 {
988         struct scatterlist *sgl = &sport->rx_sgl;
989         struct dma_chan *chan = sport->dma_chan_rx;
990         struct device *dev = sport->port.dev;
991         struct dma_async_tx_descriptor *desc;
992         int ret;
993
994         sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
995         ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
996         if (ret == 0) {
997                 dev_err(dev, "DMA mapping error for RX.\n");
998                 return -EINVAL;
999         }
1000         desc = dmaengine_prep_slave_sg(chan, sgl, 1, DMA_DEV_TO_MEM,
1001                                         DMA_PREP_INTERRUPT);
1002         if (!desc) {
1003                 dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE);
1004                 dev_err(dev, "We cannot prepare for the RX slave dma!\n");
1005                 return -EINVAL;
1006         }
1007         desc->callback = dma_rx_callback;
1008         desc->callback_param = sport;
1009
1010         dev_dbg(dev, "RX: prepare for the DMA.\n");
1011         dmaengine_submit(desc);
1012         dma_async_issue_pending(chan);
1013         return 0;
1014 }
1015
1016 #define TXTL_DEFAULT 2 /* reset default */
1017 #define RXTL_DEFAULT 1 /* reset default */
1018 #define TXTL_DMA 8 /* DMA burst setting */
1019 #define RXTL_DMA 9 /* DMA burst setting */
1020
1021 static void imx_setup_ufcr(struct imx_port *sport,
1022                           unsigned char txwl, unsigned char rxwl)
1023 {
1024         unsigned int val;
1025
1026         /* set receiver / transmitter trigger level */
1027         val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
1028         val |= txwl << UFCR_TXTL_SHF | rxwl;
1029         writel(val, sport->port.membase + UFCR);
1030 }
1031
1032 static void imx_uart_dma_exit(struct imx_port *sport)
1033 {
1034         if (sport->dma_chan_rx) {
1035                 dma_release_channel(sport->dma_chan_rx);
1036                 sport->dma_chan_rx = NULL;
1037
1038                 kfree(sport->rx_buf);
1039                 sport->rx_buf = NULL;
1040         }
1041
1042         if (sport->dma_chan_tx) {
1043                 dma_release_channel(sport->dma_chan_tx);
1044                 sport->dma_chan_tx = NULL;
1045         }
1046
1047         sport->dma_is_inited = 0;
1048 }
1049
1050 static int imx_uart_dma_init(struct imx_port *sport)
1051 {
1052         struct dma_slave_config slave_config = {};
1053         struct device *dev = sport->port.dev;
1054         int ret;
1055
1056         /* Prepare for RX : */
1057         sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
1058         if (!sport->dma_chan_rx) {
1059                 dev_dbg(dev, "cannot get the DMA channel.\n");
1060                 ret = -EINVAL;
1061                 goto err;
1062         }
1063
1064         slave_config.direction = DMA_DEV_TO_MEM;
1065         slave_config.src_addr = sport->port.mapbase + URXD0;
1066         slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1067         /* one byte less than the watermark level to enable the aging timer */
1068         slave_config.src_maxburst = RXTL_DMA - 1;
1069         ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config);
1070         if (ret) {
1071                 dev_err(dev, "error in RX dma configuration.\n");
1072                 goto err;
1073         }
1074
1075         sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1076         if (!sport->rx_buf) {
1077                 ret = -ENOMEM;
1078                 goto err;
1079         }
1080
1081         /* Prepare for TX : */
1082         sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
1083         if (!sport->dma_chan_tx) {
1084                 dev_err(dev, "cannot get the TX DMA channel!\n");
1085                 ret = -EINVAL;
1086                 goto err;
1087         }
1088
1089         slave_config.direction = DMA_MEM_TO_DEV;
1090         slave_config.dst_addr = sport->port.mapbase + URTX0;
1091         slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1092         slave_config.dst_maxburst = TXTL_DMA;
1093         ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config);
1094         if (ret) {
1095                 dev_err(dev, "error in TX dma configuration.");
1096                 goto err;
1097         }
1098
1099         sport->dma_is_inited = 1;
1100
1101         return 0;
1102 err:
1103         imx_uart_dma_exit(sport);
1104         return ret;
1105 }
1106
1107 static void imx_enable_dma(struct imx_port *sport)
1108 {
1109         unsigned long temp;
1110
1111         init_waitqueue_head(&sport->dma_wait);
1112
1113         /* set UCR1 */
1114         temp = readl(sport->port.membase + UCR1);
1115         temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN;
1116         writel(temp, sport->port.membase + UCR1);
1117
1118         temp = readl(sport->port.membase + UCR2);
1119         temp |= UCR2_ATEN;
1120         writel(temp, sport->port.membase + UCR2);
1121
1122         imx_setup_ufcr(sport, TXTL_DMA, RXTL_DMA);
1123
1124         sport->dma_is_enabled = 1;
1125 }
1126
1127 static void imx_disable_dma(struct imx_port *sport)
1128 {
1129         unsigned long temp;
1130
1131         /* clear UCR1 */
1132         temp = readl(sport->port.membase + UCR1);
1133         temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN);
1134         writel(temp, sport->port.membase + UCR1);
1135
1136         /* clear UCR2 */
1137         temp = readl(sport->port.membase + UCR2);
1138         temp &= ~(UCR2_CTSC | UCR2_CTS | UCR2_ATEN);
1139         writel(temp, sport->port.membase + UCR2);
1140
1141         imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1142
1143         sport->dma_is_enabled = 0;
1144 }
1145
1146 /* half the RX buffer size */
1147 #define CTSTL 16
1148
1149 static int imx_startup(struct uart_port *port)
1150 {
1151         struct imx_port *sport = (struct imx_port *)port;
1152         int retval, i;
1153         unsigned long flags, temp;
1154
1155         retval = clk_prepare_enable(sport->clk_per);
1156         if (retval)
1157                 return retval;
1158         retval = clk_prepare_enable(sport->clk_ipg);
1159         if (retval) {
1160                 clk_disable_unprepare(sport->clk_per);
1161                 return retval;
1162         }
1163
1164         imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1165
1166         /* disable the DREN bit (Data Ready interrupt enable) before
1167          * requesting IRQs
1168          */
1169         temp = readl(sport->port.membase + UCR4);
1170
1171         /* set the trigger level for CTS */
1172         temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
1173         temp |= CTSTL << UCR4_CTSTL_SHF;
1174
1175         writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
1176
1177         /* Can we enable the DMA support? */
1178         if (is_imx6q_uart(sport) && !uart_console(port) &&
1179             !sport->dma_is_inited)
1180                 imx_uart_dma_init(sport);
1181
1182         spin_lock_irqsave(&sport->port.lock, flags);
1183         /* Reset fifo's and state machines */
1184         i = 100;
1185
1186         temp = readl(sport->port.membase + UCR2);
1187         temp &= ~UCR2_SRST;
1188         writel(temp, sport->port.membase + UCR2);
1189
1190         while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
1191                 udelay(1);
1192
1193         /*
1194          * Finally, clear and enable interrupts
1195          */
1196         writel(USR1_RTSD, sport->port.membase + USR1);
1197         writel(USR2_ORE, sport->port.membase + USR2);
1198
1199         if (sport->dma_is_inited && !sport->dma_is_enabled)
1200                 imx_enable_dma(sport);
1201
1202         temp = readl(sport->port.membase + UCR1);
1203         temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
1204
1205         writel(temp, sport->port.membase + UCR1);
1206
1207         temp = readl(sport->port.membase + UCR4);
1208         temp |= UCR4_OREN;
1209         writel(temp, sport->port.membase + UCR4);
1210
1211         temp = readl(sport->port.membase + UCR2);
1212         temp |= (UCR2_RXEN | UCR2_TXEN);
1213         if (!sport->have_rtscts)
1214                 temp |= UCR2_IRTS;
1215         writel(temp, sport->port.membase + UCR2);
1216
1217         if (!is_imx1_uart(sport)) {
1218                 temp = readl(sport->port.membase + UCR3);
1219                 temp |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
1220                 writel(temp, sport->port.membase + UCR3);
1221         }
1222
1223         /*
1224          * Enable modem status interrupts
1225          */
1226         imx_enable_ms(&sport->port);
1227         spin_unlock_irqrestore(&sport->port.lock, flags);
1228
1229         return 0;
1230 }
1231
1232 static void imx_shutdown(struct uart_port *port)
1233 {
1234         struct imx_port *sport = (struct imx_port *)port;
1235         unsigned long temp;
1236         unsigned long flags;
1237
1238         if (sport->dma_is_enabled) {
1239                 int ret;
1240
1241                 /* We have to wait for the DMA to finish. */
1242                 ret = wait_event_interruptible(sport->dma_wait,
1243                         !sport->dma_is_rxing && !sport->dma_is_txing);
1244                 if (ret != 0) {
1245                         sport->dma_is_rxing = 0;
1246                         sport->dma_is_txing = 0;
1247                         dmaengine_terminate_all(sport->dma_chan_tx);
1248                         dmaengine_terminate_all(sport->dma_chan_rx);
1249                 }
1250                 spin_lock_irqsave(&sport->port.lock, flags);
1251                 imx_stop_tx(port);
1252                 imx_stop_rx(port);
1253                 imx_disable_dma(sport);
1254                 spin_unlock_irqrestore(&sport->port.lock, flags);
1255                 imx_uart_dma_exit(sport);
1256         }
1257
1258         mctrl_gpio_disable_ms(sport->gpios);
1259
1260         spin_lock_irqsave(&sport->port.lock, flags);
1261         temp = readl(sport->port.membase + UCR2);
1262         temp &= ~(UCR2_TXEN);
1263         writel(temp, sport->port.membase + UCR2);
1264         spin_unlock_irqrestore(&sport->port.lock, flags);
1265
1266         /*
1267          * Stop our timer.
1268          */
1269         del_timer_sync(&sport->timer);
1270
1271         /*
1272          * Disable all interrupts, port and break condition.
1273          */
1274
1275         spin_lock_irqsave(&sport->port.lock, flags);
1276         temp = readl(sport->port.membase + UCR1);
1277         temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
1278
1279         writel(temp, sport->port.membase + UCR1);
1280         spin_unlock_irqrestore(&sport->port.lock, flags);
1281
1282         clk_disable_unprepare(sport->clk_per);
1283         clk_disable_unprepare(sport->clk_ipg);
1284 }
1285
1286 static void imx_flush_buffer(struct uart_port *port)
1287 {
1288         struct imx_port *sport = (struct imx_port *)port;
1289         struct scatterlist *sgl = &sport->tx_sgl[0];
1290         unsigned long temp;
1291         int i = 100, ubir, ubmr, uts;
1292
1293         if (!sport->dma_chan_tx)
1294                 return;
1295
1296         sport->tx_bytes = 0;
1297         dmaengine_terminate_all(sport->dma_chan_tx);
1298         if (sport->dma_is_txing) {
1299                 dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents,
1300                              DMA_TO_DEVICE);
1301                 temp = readl(sport->port.membase + UCR1);
1302                 temp &= ~UCR1_TDMAEN;
1303                 writel(temp, sport->port.membase + UCR1);
1304                 sport->dma_is_txing = false;
1305         }
1306
1307         /*
1308          * According to the Reference Manual description of the UART SRST bit:
1309          * "Reset the transmit and receive state machines,
1310          * all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD
1311          * and UTS[6-3]". As we don't need to restore the old values from
1312          * USR1, USR2, URXD, UTXD, only save/restore the other four registers
1313          */
1314         ubir = readl(sport->port.membase + UBIR);
1315         ubmr = readl(sport->port.membase + UBMR);
1316         uts = readl(sport->port.membase + IMX21_UTS);
1317
1318         temp = readl(sport->port.membase + UCR2);
1319         temp &= ~UCR2_SRST;
1320         writel(temp, sport->port.membase + UCR2);
1321
1322         while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
1323                 udelay(1);
1324
1325         /* Restore the registers */
1326         writel(ubir, sport->port.membase + UBIR);
1327         writel(ubmr, sport->port.membase + UBMR);
1328         writel(uts, sport->port.membase + IMX21_UTS);
1329 }
1330
1331 static void
1332 imx_set_termios(struct uart_port *port, struct ktermios *termios,
1333                    struct ktermios *old)
1334 {
1335         struct imx_port *sport = (struct imx_port *)port;
1336         unsigned long flags;
1337         unsigned long ucr2, old_ucr1, old_ucr2;
1338         unsigned int baud, quot;
1339         unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
1340         unsigned long div, ufcr;
1341         unsigned long num, denom;
1342         uint64_t tdiv64;
1343
1344         /*
1345          * We only support CS7 and CS8.
1346          */
1347         while ((termios->c_cflag & CSIZE) != CS7 &&
1348                (termios->c_cflag & CSIZE) != CS8) {
1349                 termios->c_cflag &= ~CSIZE;
1350                 termios->c_cflag |= old_csize;
1351                 old_csize = CS8;
1352         }
1353
1354         if ((termios->c_cflag & CSIZE) == CS8)
1355                 ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
1356         else
1357                 ucr2 = UCR2_SRST | UCR2_IRTS;
1358
1359         if (termios->c_cflag & CRTSCTS) {
1360                 if (sport->have_rtscts) {
1361                         ucr2 &= ~UCR2_IRTS;
1362
1363                         if (port->rs485.flags & SER_RS485_ENABLED) {
1364                                 /*
1365                                  * RTS is mandatory for rs485 operation, so keep
1366                                  * it under manual control and keep transmitter
1367                                  * disabled.
1368                                  */
1369                                 if (port->rs485.flags &
1370                                     SER_RS485_RTS_AFTER_SEND)
1371                                         imx_port_rts_inactive(sport, &ucr2);
1372                                 else
1373                                         imx_port_rts_active(sport, &ucr2);
1374                         } else {
1375                                 imx_port_rts_auto(sport, &ucr2);
1376                         }
1377                 } else {
1378                         termios->c_cflag &= ~CRTSCTS;
1379                 }
1380         } else if (port->rs485.flags & SER_RS485_ENABLED) {
1381                 /* disable transmitter */
1382                 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
1383                         imx_port_rts_inactive(sport, &ucr2);
1384                 else
1385                         imx_port_rts_active(sport, &ucr2);
1386         }
1387
1388
1389         if (termios->c_cflag & CSTOPB)
1390                 ucr2 |= UCR2_STPB;
1391         if (termios->c_cflag & PARENB) {
1392                 ucr2 |= UCR2_PREN;
1393                 if (termios->c_cflag & PARODD)
1394                         ucr2 |= UCR2_PROE;
1395         }
1396
1397         del_timer_sync(&sport->timer);
1398
1399         /*
1400          * Ask the core to calculate the divisor for us.
1401          */
1402         baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
1403         quot = uart_get_divisor(port, baud);
1404
1405         spin_lock_irqsave(&sport->port.lock, flags);
1406
1407         sport->port.read_status_mask = 0;
1408         if (termios->c_iflag & INPCK)
1409                 sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
1410         if (termios->c_iflag & (BRKINT | PARMRK))
1411                 sport->port.read_status_mask |= URXD_BRK;
1412
1413         /*
1414          * Characters to ignore
1415          */
1416         sport->port.ignore_status_mask = 0;
1417         if (termios->c_iflag & IGNPAR)
1418                 sport->port.ignore_status_mask |= URXD_PRERR | URXD_FRMERR;
1419         if (termios->c_iflag & IGNBRK) {
1420                 sport->port.ignore_status_mask |= URXD_BRK;
1421                 /*
1422                  * If we're ignoring parity and break indicators,
1423                  * ignore overruns too (for real raw support).
1424                  */
1425                 if (termios->c_iflag & IGNPAR)
1426                         sport->port.ignore_status_mask |= URXD_OVRRUN;
1427         }
1428
1429         if ((termios->c_cflag & CREAD) == 0)
1430                 sport->port.ignore_status_mask |= URXD_DUMMY_READ;
1431
1432         /*
1433          * Update the per-port timeout.
1434          */
1435         uart_update_timeout(port, termios->c_cflag, baud);
1436
1437         /*
1438          * disable interrupts and drain transmitter
1439          */
1440         old_ucr1 = readl(sport->port.membase + UCR1);
1441         writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN),
1442                         sport->port.membase + UCR1);
1443
1444         while (!(readl(sport->port.membase + USR2) & USR2_TXDC))
1445                 barrier();
1446
1447         /* then, disable everything */
1448         old_ucr2 = readl(sport->port.membase + UCR2);
1449         writel(old_ucr2 & ~(UCR2_TXEN | UCR2_RXEN),
1450                         sport->port.membase + UCR2);
1451         old_ucr2 &= (UCR2_TXEN | UCR2_RXEN | UCR2_ATEN);
1452
1453         /* custom-baudrate handling */
1454         div = sport->port.uartclk / (baud * 16);
1455         if (baud == 38400 && quot != div)
1456                 baud = sport->port.uartclk / (quot * 16);
1457
1458         div = sport->port.uartclk / (baud * 16);
1459         if (div > 7)
1460                 div = 7;
1461         if (!div)
1462                 div = 1;
1463
1464         rational_best_approximation(16 * div * baud, sport->port.uartclk,
1465                 1 << 16, 1 << 16, &num, &denom);
1466
1467         tdiv64 = sport->port.uartclk;
1468         tdiv64 *= num;
1469         do_div(tdiv64, denom * 16 * div);
1470         tty_termios_encode_baud_rate(termios,
1471                                 (speed_t)tdiv64, (speed_t)tdiv64);
1472
1473         num -= 1;
1474         denom -= 1;
1475
1476         ufcr = readl(sport->port.membase + UFCR);
1477         ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
1478         if (sport->dte_mode)
1479                 ufcr |= UFCR_DCEDTE;
1480         writel(ufcr, sport->port.membase + UFCR);
1481
1482         writel(num, sport->port.membase + UBIR);
1483         writel(denom, sport->port.membase + UBMR);
1484
1485         if (!is_imx1_uart(sport))
1486                 writel(sport->port.uartclk / div / 1000,
1487                                 sport->port.membase + IMX21_ONEMS);
1488
1489         writel(old_ucr1, sport->port.membase + UCR1);
1490
1491         /* set the parity, stop bits and data size */
1492         writel(ucr2 | old_ucr2, sport->port.membase + UCR2);
1493
1494         if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1495                 imx_enable_ms(&sport->port);
1496
1497         spin_unlock_irqrestore(&sport->port.lock, flags);
1498 }
1499
1500 static const char *imx_type(struct uart_port *port)
1501 {
1502         struct imx_port *sport = (struct imx_port *)port;
1503
1504         return sport->port.type == PORT_IMX ? "IMX" : NULL;
1505 }
1506
1507 /*
1508  * Configure/autoconfigure the port.
1509  */
1510 static void imx_config_port(struct uart_port *port, int flags)
1511 {
1512         struct imx_port *sport = (struct imx_port *)port;
1513
1514         if (flags & UART_CONFIG_TYPE)
1515                 sport->port.type = PORT_IMX;
1516 }
1517
1518 /*
1519  * Verify the new serial_struct (for TIOCSSERIAL).
1520  * The only change we allow are to the flags and type, and
1521  * even then only between PORT_IMX and PORT_UNKNOWN
1522  */
1523 static int
1524 imx_verify_port(struct uart_port *port, struct serial_struct *ser)
1525 {
1526         struct imx_port *sport = (struct imx_port *)port;
1527         int ret = 0;
1528
1529         if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX)
1530                 ret = -EINVAL;
1531         if (sport->port.irq != ser->irq)
1532                 ret = -EINVAL;
1533         if (ser->io_type != UPIO_MEM)
1534                 ret = -EINVAL;
1535         if (sport->port.uartclk / 16 != ser->baud_base)
1536                 ret = -EINVAL;
1537         if (sport->port.mapbase != (unsigned long)ser->iomem_base)
1538                 ret = -EINVAL;
1539         if (sport->port.iobase != ser->port)
1540                 ret = -EINVAL;
1541         if (ser->hub6 != 0)
1542                 ret = -EINVAL;
1543         return ret;
1544 }
1545
1546 #if defined(CONFIG_CONSOLE_POLL)
1547
1548 static int imx_poll_init(struct uart_port *port)
1549 {
1550         struct imx_port *sport = (struct imx_port *)port;
1551         unsigned long flags;
1552         unsigned long temp;
1553         int retval;
1554
1555         retval = clk_prepare_enable(sport->clk_ipg);
1556         if (retval)
1557                 return retval;
1558         retval = clk_prepare_enable(sport->clk_per);
1559         if (retval)
1560                 clk_disable_unprepare(sport->clk_ipg);
1561
1562         imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1563
1564         spin_lock_irqsave(&sport->port.lock, flags);
1565
1566         temp = readl(sport->port.membase + UCR1);
1567         if (is_imx1_uart(sport))
1568                 temp |= IMX1_UCR1_UARTCLKEN;
1569         temp |= UCR1_UARTEN | UCR1_RRDYEN;
1570         temp &= ~(UCR1_TXMPTYEN | UCR1_RTSDEN);
1571         writel(temp, sport->port.membase + UCR1);
1572
1573         temp = readl(sport->port.membase + UCR2);
1574         temp |= UCR2_RXEN;
1575         writel(temp, sport->port.membase + UCR2);
1576
1577         spin_unlock_irqrestore(&sport->port.lock, flags);
1578
1579         return 0;
1580 }
1581
1582 static int imx_poll_get_char(struct uart_port *port)
1583 {
1584         if (!(readl_relaxed(port->membase + USR2) & USR2_RDR))
1585                 return NO_POLL_CHAR;
1586
1587         return readl_relaxed(port->membase + URXD0) & URXD_RX_DATA;
1588 }
1589
1590 static void imx_poll_put_char(struct uart_port *port, unsigned char c)
1591 {
1592         unsigned int status;
1593
1594         /* drain */
1595         do {
1596                 status = readl_relaxed(port->membase + USR1);
1597         } while (~status & USR1_TRDY);
1598
1599         /* write */
1600         writel_relaxed(c, port->membase + URTX0);
1601
1602         /* flush */
1603         do {
1604                 status = readl_relaxed(port->membase + USR2);
1605         } while (~status & USR2_TXDC);
1606 }
1607 #endif
1608
1609 static int imx_rs485_config(struct uart_port *port,
1610                             struct serial_rs485 *rs485conf)
1611 {
1612         struct imx_port *sport = (struct imx_port *)port;
1613
1614         /* unimplemented */
1615         rs485conf->delay_rts_before_send = 0;
1616         rs485conf->delay_rts_after_send = 0;
1617         rs485conf->flags |= SER_RS485_RX_DURING_TX;
1618
1619         /* RTS is required to control the transmitter */
1620         if (!sport->have_rtscts)
1621                 rs485conf->flags &= ~SER_RS485_ENABLED;
1622
1623         if (rs485conf->flags & SER_RS485_ENABLED) {
1624                 unsigned long temp;
1625
1626                 /* disable transmitter */
1627                 temp = readl(sport->port.membase + UCR2);
1628                 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
1629                         imx_port_rts_inactive(sport, &temp);
1630                 else
1631                         imx_port_rts_active(sport, &temp);
1632                 writel(temp, sport->port.membase + UCR2);
1633         }
1634
1635         port->rs485 = *rs485conf;
1636
1637         return 0;
1638 }
1639
1640 static struct uart_ops imx_pops = {
1641         .tx_empty       = imx_tx_empty,
1642         .set_mctrl      = imx_set_mctrl,
1643         .get_mctrl      = imx_get_mctrl,
1644         .stop_tx        = imx_stop_tx,
1645         .start_tx       = imx_start_tx,
1646         .stop_rx        = imx_stop_rx,
1647         .enable_ms      = imx_enable_ms,
1648         .break_ctl      = imx_break_ctl,
1649         .startup        = imx_startup,
1650         .shutdown       = imx_shutdown,
1651         .flush_buffer   = imx_flush_buffer,
1652         .set_termios    = imx_set_termios,
1653         .type           = imx_type,
1654         .config_port    = imx_config_port,
1655         .verify_port    = imx_verify_port,
1656 #if defined(CONFIG_CONSOLE_POLL)
1657         .poll_init      = imx_poll_init,
1658         .poll_get_char  = imx_poll_get_char,
1659         .poll_put_char  = imx_poll_put_char,
1660 #endif
1661 };
1662
1663 static struct imx_port *imx_ports[UART_NR];
1664
1665 #ifdef CONFIG_SERIAL_IMX_CONSOLE
1666 static void imx_console_putchar(struct uart_port *port, int ch)
1667 {
1668         struct imx_port *sport = (struct imx_port *)port;
1669
1670         while (readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)
1671                 barrier();
1672
1673         writel(ch, sport->port.membase + URTX0);
1674 }
1675
1676 /*
1677  * Interrupts are disabled on entering
1678  */
1679 static void
1680 imx_console_write(struct console *co, const char *s, unsigned int count)
1681 {
1682         struct imx_port *sport = imx_ports[co->index];
1683         struct imx_port_ucrs old_ucr;
1684         unsigned int ucr1;
1685         unsigned long flags = 0;
1686         int locked = 1;
1687         int retval;
1688
1689         retval = clk_enable(sport->clk_per);
1690         if (retval)
1691                 return;
1692         retval = clk_enable(sport->clk_ipg);
1693         if (retval) {
1694                 clk_disable(sport->clk_per);
1695                 return;
1696         }
1697
1698         if (sport->port.sysrq)
1699                 locked = 0;
1700         else if (oops_in_progress)
1701                 locked = spin_trylock_irqsave(&sport->port.lock, flags);
1702         else
1703                 spin_lock_irqsave(&sport->port.lock, flags);
1704
1705         /*
1706          *      First, save UCR1/2/3 and then disable interrupts
1707          */
1708         imx_port_ucrs_save(&sport->port, &old_ucr);
1709         ucr1 = old_ucr.ucr1;
1710
1711         if (is_imx1_uart(sport))
1712                 ucr1 |= IMX1_UCR1_UARTCLKEN;
1713         ucr1 |= UCR1_UARTEN;
1714         ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);
1715
1716         writel(ucr1, sport->port.membase + UCR1);
1717
1718         writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
1719
1720         uart_console_write(&sport->port, s, count, imx_console_putchar);
1721
1722         /*
1723          *      Finally, wait for transmitter to become empty
1724          *      and restore UCR1/2/3
1725          */
1726         while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
1727
1728         imx_port_ucrs_restore(&sport->port, &old_ucr);
1729
1730         if (locked)
1731                 spin_unlock_irqrestore(&sport->port.lock, flags);
1732
1733         clk_disable(sport->clk_ipg);
1734         clk_disable(sport->clk_per);
1735 }
1736
1737 /*
1738  * If the port was already initialised (eg, by a boot loader),
1739  * try to determine the current setup.
1740  */
1741 static void __init
1742 imx_console_get_options(struct imx_port *sport, int *baud,
1743                            int *parity, int *bits)
1744 {
1745
1746         if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) {
1747                 /* ok, the port was enabled */
1748                 unsigned int ucr2, ubir, ubmr, uartclk;
1749                 unsigned int baud_raw;
1750                 unsigned int ucfr_rfdiv;
1751
1752                 ucr2 = readl(sport->port.membase + UCR2);
1753
1754                 *parity = 'n';
1755                 if (ucr2 & UCR2_PREN) {
1756                         if (ucr2 & UCR2_PROE)
1757                                 *parity = 'o';
1758                         else
1759                                 *parity = 'e';
1760                 }
1761
1762                 if (ucr2 & UCR2_WS)
1763                         *bits = 8;
1764                 else
1765                         *bits = 7;
1766
1767                 ubir = readl(sport->port.membase + UBIR) & 0xffff;
1768                 ubmr = readl(sport->port.membase + UBMR) & 0xffff;
1769
1770                 ucfr_rfdiv = (readl(sport->port.membase + UFCR) & UFCR_RFDIV) >> 7;
1771                 if (ucfr_rfdiv == 6)
1772                         ucfr_rfdiv = 7;
1773                 else
1774                         ucfr_rfdiv = 6 - ucfr_rfdiv;
1775
1776                 uartclk = clk_get_rate(sport->clk_per);
1777                 uartclk /= ucfr_rfdiv;
1778
1779                 {       /*
1780                          * The next code provides exact computation of
1781                          *   baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1))
1782                          * without need of float support or long long division,
1783                          * which would be required to prevent 32bit arithmetic overflow
1784                          */
1785                         unsigned int mul = ubir + 1;
1786                         unsigned int div = 16 * (ubmr + 1);
1787                         unsigned int rem = uartclk % div;
1788
1789                         baud_raw = (uartclk / div) * mul;
1790                         baud_raw += (rem * mul + div / 2) / div;
1791                         *baud = (baud_raw + 50) / 100 * 100;
1792                 }
1793
1794                 if (*baud != baud_raw)
1795                         pr_info("Console IMX rounded baud rate from %d to %d\n",
1796                                 baud_raw, *baud);
1797         }
1798 }
1799
1800 static int __init
1801 imx_console_setup(struct console *co, char *options)
1802 {
1803         struct imx_port *sport;
1804         int baud = 9600;
1805         int bits = 8;
1806         int parity = 'n';
1807         int flow = 'n';
1808         int retval;
1809
1810         /*
1811          * Check whether an invalid uart number has been specified, and
1812          * if so, search for the first available port that does have
1813          * console support.
1814          */
1815         if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports))
1816                 co->index = 0;
1817         sport = imx_ports[co->index];
1818         if (sport == NULL)
1819                 return -ENODEV;
1820
1821         /* For setting the registers, we only need to enable the ipg clock. */
1822         retval = clk_prepare_enable(sport->clk_ipg);
1823         if (retval)
1824                 goto error_console;
1825
1826         if (options)
1827                 uart_parse_options(options, &baud, &parity, &bits, &flow);
1828         else
1829                 imx_console_get_options(sport, &baud, &parity, &bits);
1830
1831         imx_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
1832
1833         retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
1834
1835         clk_disable(sport->clk_ipg);
1836         if (retval) {
1837                 clk_unprepare(sport->clk_ipg);
1838                 goto error_console;
1839         }
1840
1841         retval = clk_prepare(sport->clk_per);
1842         if (retval)
1843                 clk_disable_unprepare(sport->clk_ipg);
1844
1845 error_console:
1846         return retval;
1847 }
1848
1849 static struct uart_driver imx_reg;
1850 static struct console imx_console = {
1851         .name           = DEV_NAME,
1852         .write          = imx_console_write,
1853         .device         = uart_console_device,
1854         .setup          = imx_console_setup,
1855         .flags          = CON_PRINTBUFFER,
1856         .index          = -1,
1857         .data           = &imx_reg,
1858 };
1859
1860 #define IMX_CONSOLE     &imx_console
1861
1862 #ifdef CONFIG_OF
1863 static void imx_console_early_putchar(struct uart_port *port, int ch)
1864 {
1865         while (readl_relaxed(port->membase + IMX21_UTS) & UTS_TXFULL)
1866                 cpu_relax();
1867
1868         writel_relaxed(ch, port->membase + URTX0);
1869 }
1870
1871 static void imx_console_early_write(struct console *con, const char *s,
1872                                     unsigned count)
1873 {
1874         struct earlycon_device *dev = con->data;
1875
1876         uart_console_write(&dev->port, s, count, imx_console_early_putchar);
1877 }
1878
1879 static int __init
1880 imx_console_early_setup(struct earlycon_device *dev, const char *opt)
1881 {
1882         if (!dev->port.membase)
1883                 return -ENODEV;
1884
1885         dev->con->write = imx_console_early_write;
1886
1887         return 0;
1888 }
1889 OF_EARLYCON_DECLARE(ec_imx6q, "fsl,imx6q-uart", imx_console_early_setup);
1890 OF_EARLYCON_DECLARE(ec_imx21, "fsl,imx21-uart", imx_console_early_setup);
1891 #endif
1892
1893 #else
1894 #define IMX_CONSOLE     NULL
1895 #endif
1896
1897 static struct uart_driver imx_reg = {
1898         .owner          = THIS_MODULE,
1899         .driver_name    = DRIVER_NAME,
1900         .dev_name       = DEV_NAME,
1901         .major          = SERIAL_IMX_MAJOR,
1902         .minor          = MINOR_START,
1903         .nr             = ARRAY_SIZE(imx_ports),
1904         .cons           = IMX_CONSOLE,
1905 };
1906
1907 #ifdef CONFIG_OF
1908 /*
1909  * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
1910  * could successfully get all information from dt or a negative errno.
1911  */
1912 static int serial_imx_probe_dt(struct imx_port *sport,
1913                 struct platform_device *pdev)
1914 {
1915         struct device_node *np = pdev->dev.of_node;
1916         int ret;
1917
1918         sport->devdata = of_device_get_match_data(&pdev->dev);
1919         if (!sport->devdata)
1920                 /* no device tree device */
1921                 return 1;
1922
1923         ret = of_alias_get_id(np, "serial");
1924         if (ret < 0) {
1925                 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
1926                 return ret;
1927         }
1928         sport->port.line = ret;
1929
1930         if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
1931                 sport->have_rtscts = 1;
1932
1933         if (of_get_property(np, "fsl,dte-mode", NULL))
1934                 sport->dte_mode = 1;
1935
1936         return 0;
1937 }
1938 #else
1939 static inline int serial_imx_probe_dt(struct imx_port *sport,
1940                 struct platform_device *pdev)
1941 {
1942         return 1;
1943 }
1944 #endif
1945
1946 static void serial_imx_probe_pdata(struct imx_port *sport,
1947                 struct platform_device *pdev)
1948 {
1949         struct imxuart_platform_data *pdata = dev_get_platdata(&pdev->dev);
1950
1951         sport->port.line = pdev->id;
1952         sport->devdata = (struct imx_uart_data  *) pdev->id_entry->driver_data;
1953
1954         if (!pdata)
1955                 return;
1956
1957         if (pdata->flags & IMXUART_HAVE_RTSCTS)
1958                 sport->have_rtscts = 1;
1959 }
1960
1961 static int serial_imx_probe(struct platform_device *pdev)
1962 {
1963         struct imx_port *sport;
1964         void __iomem *base;
1965         int ret = 0, reg;
1966         struct resource *res;
1967         int txirq, rxirq, rtsirq;
1968
1969         sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
1970         if (!sport)
1971                 return -ENOMEM;
1972
1973         ret = serial_imx_probe_dt(sport, pdev);
1974         if (ret > 0)
1975                 serial_imx_probe_pdata(sport, pdev);
1976         else if (ret < 0)
1977                 return ret;
1978
1979         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1980         base = devm_ioremap_resource(&pdev->dev, res);
1981         if (IS_ERR(base))
1982                 return PTR_ERR(base);
1983
1984         rxirq = platform_get_irq(pdev, 0);
1985         txirq = platform_get_irq(pdev, 1);
1986         rtsirq = platform_get_irq(pdev, 2);
1987
1988         sport->port.dev = &pdev->dev;
1989         sport->port.mapbase = res->start;
1990         sport->port.membase = base;
1991         sport->port.type = PORT_IMX,
1992         sport->port.iotype = UPIO_MEM;
1993         sport->port.irq = rxirq;
1994         sport->port.fifosize = 32;
1995         sport->port.ops = &imx_pops;
1996         sport->port.rs485_config = imx_rs485_config;
1997         sport->port.rs485.flags =
1998                 SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX;
1999         sport->port.flags = UPF_BOOT_AUTOCONF;
2000         init_timer(&sport->timer);
2001         sport->timer.function = imx_timeout;
2002         sport->timer.data     = (unsigned long)sport;
2003
2004         sport->gpios = mctrl_gpio_init(&sport->port, 0);
2005         if (IS_ERR(sport->gpios))
2006                 return PTR_ERR(sport->gpios);
2007
2008         sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2009         if (IS_ERR(sport->clk_ipg)) {
2010                 ret = PTR_ERR(sport->clk_ipg);
2011                 dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
2012                 return ret;
2013         }
2014
2015         sport->clk_per = devm_clk_get(&pdev->dev, "per");
2016         if (IS_ERR(sport->clk_per)) {
2017                 ret = PTR_ERR(sport->clk_per);
2018                 dev_err(&pdev->dev, "failed to get per clk: %d\n", ret);
2019                 return ret;
2020         }
2021
2022         sport->port.uartclk = clk_get_rate(sport->clk_per);
2023
2024         /* For register access, we only need to enable the ipg clock. */
2025         ret = clk_prepare_enable(sport->clk_ipg);
2026         if (ret)
2027                 return ret;
2028
2029         /* Disable interrupts before requesting them */
2030         reg = readl_relaxed(sport->port.membase + UCR1);
2031         reg &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN |
2032                  UCR1_TXMPTYEN | UCR1_RTSDEN);
2033         writel_relaxed(reg, sport->port.membase + UCR1);
2034
2035         clk_disable_unprepare(sport->clk_ipg);
2036
2037         /*
2038          * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
2039          * chips only have one interrupt.
2040          */
2041         if (txirq > 0) {
2042                 ret = devm_request_irq(&pdev->dev, rxirq, imx_rxint, 0,
2043                                        dev_name(&pdev->dev), sport);
2044                 if (ret)
2045                         return ret;
2046
2047                 ret = devm_request_irq(&pdev->dev, txirq, imx_txint, 0,
2048                                        dev_name(&pdev->dev), sport);
2049                 if (ret)
2050                         return ret;
2051         } else {
2052                 ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
2053                                        dev_name(&pdev->dev), sport);
2054                 if (ret)
2055                         return ret;
2056         }
2057
2058         imx_ports[sport->port.line] = sport;
2059
2060         platform_set_drvdata(pdev, sport);
2061
2062         return uart_add_one_port(&imx_reg, &sport->port);
2063 }
2064
2065 static int serial_imx_remove(struct platform_device *pdev)
2066 {
2067         struct imx_port *sport = platform_get_drvdata(pdev);
2068
2069         return uart_remove_one_port(&imx_reg, &sport->port);
2070 }
2071
2072 static void serial_imx_restore_context(struct imx_port *sport)
2073 {
2074         if (!sport->context_saved)
2075                 return;
2076
2077         writel(sport->saved_reg[4], sport->port.membase + UFCR);
2078         writel(sport->saved_reg[5], sport->port.membase + UESC);
2079         writel(sport->saved_reg[6], sport->port.membase + UTIM);
2080         writel(sport->saved_reg[7], sport->port.membase + UBIR);
2081         writel(sport->saved_reg[8], sport->port.membase + UBMR);
2082         writel(sport->saved_reg[9], sport->port.membase + IMX21_UTS);
2083         writel(sport->saved_reg[0], sport->port.membase + UCR1);
2084         writel(sport->saved_reg[1] | UCR2_SRST, sport->port.membase + UCR2);
2085         writel(sport->saved_reg[2], sport->port.membase + UCR3);
2086         writel(sport->saved_reg[3], sport->port.membase + UCR4);
2087         sport->context_saved = false;
2088 }
2089
2090 static void serial_imx_save_context(struct imx_port *sport)
2091 {
2092         /* Save necessary regs */
2093         sport->saved_reg[0] = readl(sport->port.membase + UCR1);
2094         sport->saved_reg[1] = readl(sport->port.membase + UCR2);
2095         sport->saved_reg[2] = readl(sport->port.membase + UCR3);
2096         sport->saved_reg[3] = readl(sport->port.membase + UCR4);
2097         sport->saved_reg[4] = readl(sport->port.membase + UFCR);
2098         sport->saved_reg[5] = readl(sport->port.membase + UESC);
2099         sport->saved_reg[6] = readl(sport->port.membase + UTIM);
2100         sport->saved_reg[7] = readl(sport->port.membase + UBIR);
2101         sport->saved_reg[8] = readl(sport->port.membase + UBMR);
2102         sport->saved_reg[9] = readl(sport->port.membase + IMX21_UTS);
2103         sport->context_saved = true;
2104 }
2105
2106 static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
2107 {
2108         unsigned int val;
2109
2110         val = readl(sport->port.membase + UCR3);
2111         if (on)
2112                 val |= UCR3_AWAKEN;
2113         else
2114                 val &= ~UCR3_AWAKEN;
2115         writel(val, sport->port.membase + UCR3);
2116
2117         val = readl(sport->port.membase + UCR1);
2118         if (on)
2119                 val |= UCR1_RTSDEN;
2120         else
2121                 val &= ~UCR1_RTSDEN;
2122         writel(val, sport->port.membase + UCR1);
2123 }
2124
2125 static int imx_serial_port_suspend_noirq(struct device *dev)
2126 {
2127         struct platform_device *pdev = to_platform_device(dev);
2128         struct imx_port *sport = platform_get_drvdata(pdev);
2129         int ret;
2130
2131         ret = clk_enable(sport->clk_ipg);
2132         if (ret)
2133                 return ret;
2134
2135         serial_imx_save_context(sport);
2136
2137         clk_disable(sport->clk_ipg);
2138
2139         return 0;
2140 }
2141
2142 static int imx_serial_port_resume_noirq(struct device *dev)
2143 {
2144         struct platform_device *pdev = to_platform_device(dev);
2145         struct imx_port *sport = platform_get_drvdata(pdev);
2146         int ret;
2147
2148         ret = clk_enable(sport->clk_ipg);
2149         if (ret)
2150                 return ret;
2151
2152         serial_imx_restore_context(sport);
2153
2154         clk_disable(sport->clk_ipg);
2155
2156         return 0;
2157 }
2158
2159 static int imx_serial_port_suspend(struct device *dev)
2160 {
2161         struct platform_device *pdev = to_platform_device(dev);
2162         struct imx_port *sport = platform_get_drvdata(pdev);
2163
2164         /* enable wakeup from i.MX UART */
2165         serial_imx_enable_wakeup(sport, true);
2166
2167         uart_suspend_port(&imx_reg, &sport->port);
2168
2169         /* Needed to enable clock in suspend_noirq */
2170         return clk_prepare(sport->clk_ipg);
2171 }
2172
2173 static int imx_serial_port_resume(struct device *dev)
2174 {
2175         struct platform_device *pdev = to_platform_device(dev);
2176         struct imx_port *sport = platform_get_drvdata(pdev);
2177
2178         /* disable wakeup from i.MX UART */
2179         serial_imx_enable_wakeup(sport, false);
2180
2181         uart_resume_port(&imx_reg, &sport->port);
2182
2183         clk_unprepare(sport->clk_ipg);
2184
2185         return 0;
2186 }
2187
2188 static const struct dev_pm_ops imx_serial_port_pm_ops = {
2189         .suspend_noirq = imx_serial_port_suspend_noirq,
2190         .resume_noirq = imx_serial_port_resume_noirq,
2191         .suspend = imx_serial_port_suspend,
2192         .resume = imx_serial_port_resume,
2193 };
2194
2195 static struct platform_driver serial_imx_driver = {
2196         .probe          = serial_imx_probe,
2197         .remove         = serial_imx_remove,
2198
2199         .id_table       = imx_uart_devtype,
2200         .driver         = {
2201                 .name   = "imx-uart",
2202                 .of_match_table = imx_uart_dt_ids,
2203                 .pm     = &imx_serial_port_pm_ops,
2204         },
2205 };
2206
2207 static int __init imx_serial_init(void)
2208 {
2209         int ret = uart_register_driver(&imx_reg);
2210
2211         if (ret)
2212                 return ret;
2213
2214         ret = platform_driver_register(&serial_imx_driver);
2215         if (ret != 0)
2216                 uart_unregister_driver(&imx_reg);
2217
2218         return ret;
2219 }
2220
2221 static void __exit imx_serial_exit(void)
2222 {
2223         platform_driver_unregister(&serial_imx_driver);
2224         uart_unregister_driver(&imx_reg);
2225 }
2226
2227 module_init(imx_serial_init);
2228 module_exit(imx_serial_exit);
2229
2230 MODULE_AUTHOR("Sascha Hauer");
2231 MODULE_DESCRIPTION("IMX generic serial port driver");
2232 MODULE_LICENSE("GPL");
2233 MODULE_ALIAS("platform:imx-uart");