Merge branches 'samsung/cleanup', 'samsung/exynos-clk' and 'samsung/exynos-clk2'...
[cascardo/linux.git] / drivers / tty / serial / sirfsoc_uart.c
1 /*
2  * Driver for CSR SiRFprimaII onboard UARTs.
3  *
4  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5  *
6  * Licensed under GPLv2 or later.
7  */
8
9 #include <linux/module.h>
10 #include <linux/ioport.h>
11 #include <linux/platform_device.h>
12 #include <linux/init.h>
13 #include <linux/sysrq.h>
14 #include <linux/console.h>
15 #include <linux/tty.h>
16 #include <linux/tty_flip.h>
17 #include <linux/serial_core.h>
18 #include <linux/serial.h>
19 #include <linux/clk.h>
20 #include <linux/of.h>
21 #include <linux/slab.h>
22 #include <linux/io.h>
23 #include <linux/of_gpio.h>
24 #include <linux/dmaengine.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/sirfsoc_dma.h>
28 #include <asm/irq.h>
29 #include <asm/mach/irq.h>
30
31 #include "sirfsoc_uart.h"
32
33 static unsigned int
34 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
35 static unsigned int
36 sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
37 static struct uart_driver sirfsoc_uart_drv;
38
39 static void sirfsoc_uart_tx_dma_complete_callback(void *param);
40 static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port);
41 static void sirfsoc_uart_rx_dma_complete_callback(void *param);
42 static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
43         {4000000, 2359296},
44         {3500000, 1310721},
45         {3000000, 1572865},
46         {2500000, 1245186},
47         {2000000, 1572866},
48         {1500000, 1245188},
49         {1152000, 1638404},
50         {1000000, 1572869},
51         {921600, 1114120},
52         {576000, 1245196},
53         {500000, 1245198},
54         {460800, 1572876},
55         {230400, 1310750},
56         {115200, 1310781},
57         {57600, 1310843},
58         {38400, 1114328},
59         {19200, 1114545},
60         {9600, 1114979},
61 };
62
63 static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
64         [0] = {
65                 .port = {
66                         .iotype         = UPIO_MEM,
67                         .flags          = UPF_BOOT_AUTOCONF,
68                         .line           = 0,
69                 },
70         },
71         [1] = {
72                 .port = {
73                         .iotype         = UPIO_MEM,
74                         .flags          = UPF_BOOT_AUTOCONF,
75                         .line           = 1,
76                 },
77         },
78         [2] = {
79                 .port = {
80                         .iotype         = UPIO_MEM,
81                         .flags          = UPF_BOOT_AUTOCONF,
82                         .line           = 2,
83                 },
84         },
85         [3] = {
86                 .port = {
87                         .iotype         = UPIO_MEM,
88                         .flags          = UPF_BOOT_AUTOCONF,
89                         .line           = 3,
90                 },
91         },
92         [4] = {
93                 .port = {
94                         .iotype         = UPIO_MEM,
95                         .flags          = UPF_BOOT_AUTOCONF,
96                         .line           = 4,
97                 },
98         },
99         [5] = {
100                 .port = {
101                         .iotype         = UPIO_MEM,
102                         .flags          = UPF_BOOT_AUTOCONF,
103                         .line           = 5,
104                 },
105         },
106 };
107
108 static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
109 {
110         return container_of(port, struct sirfsoc_uart_port, port);
111 }
112
113 static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
114 {
115         unsigned long reg;
116         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
117         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
118         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
119         reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status);
120
121         return (reg & ufifo_st->ff_empty(port->line)) ? TIOCSER_TEMT : 0;
122 }
123
124 static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
125 {
126         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
127         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
128         if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
129                 goto cts_asserted;
130         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
131                 if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) &
132                                                 SIRFUART_AFC_CTS_STATUS))
133                         goto cts_asserted;
134                 else
135                         goto cts_deasserted;
136         } else {
137                 if (!gpio_get_value(sirfport->cts_gpio))
138                         goto cts_asserted;
139                 else
140                         goto cts_deasserted;
141         }
142 cts_deasserted:
143         return TIOCM_CAR | TIOCM_DSR;
144 cts_asserted:
145         return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
146 }
147
148 static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
149 {
150         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
151         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
152         unsigned int assert = mctrl & TIOCM_RTS;
153         unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
154         unsigned int current_val;
155
156         if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
157                 return;
158         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
159                 current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF;
160                 val |= current_val;
161                 wr_regl(port, ureg->sirfsoc_afc_ctrl, val);
162         } else {
163                 if (!val)
164                         gpio_set_value(sirfport->rts_gpio, 1);
165                 else
166                         gpio_set_value(sirfport->rts_gpio, 0);
167         }
168 }
169
170 static void sirfsoc_uart_stop_tx(struct uart_port *port)
171 {
172         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
173         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
174         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
175
176         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
177                 if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
178                         dmaengine_pause(sirfport->tx_dma_chan);
179                         sirfport->tx_dma_state = TX_DMA_PAUSE;
180                 } else {
181                         if (!sirfport->is_marco)
182                                 wr_regl(port, ureg->sirfsoc_int_en_reg,
183                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
184                                 ~uint_en->sirfsoc_txfifo_empty_en);
185                         else
186                                 wr_regl(port, SIRFUART_INT_EN_CLR,
187                                 uint_en->sirfsoc_txfifo_empty_en);
188                 }
189         } else {
190                 if (!sirfport->is_marco)
191                         wr_regl(port, ureg->sirfsoc_int_en_reg,
192                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
193                                 ~uint_en->sirfsoc_txfifo_empty_en);
194                 else
195                         wr_regl(port, SIRFUART_INT_EN_CLR,
196                                 uint_en->sirfsoc_txfifo_empty_en);
197         }
198 }
199
200 static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
201 {
202         struct uart_port *port = &sirfport->port;
203         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
204         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
205         struct circ_buf *xmit = &port->state->xmit;
206         unsigned long tran_size;
207         unsigned long tran_start;
208         unsigned long pio_tx_size;
209
210         tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
211         tran_start = (unsigned long)(xmit->buf + xmit->tail);
212         if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
213                         !tran_size)
214                 return;
215         if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
216                 dmaengine_resume(sirfport->tx_dma_chan);
217                 return;
218         }
219         if (sirfport->tx_dma_state == TX_DMA_RUNNING)
220                 return;
221         if (!sirfport->is_marco)
222                 wr_regl(port, ureg->sirfsoc_int_en_reg,
223                                 rd_regl(port, ureg->sirfsoc_int_en_reg)&
224                                 ~(uint_en->sirfsoc_txfifo_empty_en));
225         else
226                 wr_regl(port, SIRFUART_INT_EN_CLR,
227                                 uint_en->sirfsoc_txfifo_empty_en);
228         /*
229          * DMA requires buffer address and buffer length are both aligned with
230          * 4 bytes, so we use PIO for
231          * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
232          * bytes, and move to DMA for the left part aligned with 4bytes
233          * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
234          * part first, move to PIO for the left 1~3 bytes
235          */
236         if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
237                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
238                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
239                         rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
240                         SIRFUART_IO_MODE);
241                 if (BYTES_TO_ALIGN(tran_start)) {
242                         pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
243                                 BYTES_TO_ALIGN(tran_start));
244                         tran_size -= pio_tx_size;
245                 }
246                 if (tran_size < 4)
247                         sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
248                 if (!sirfport->is_marco)
249                         wr_regl(port, ureg->sirfsoc_int_en_reg,
250                                 rd_regl(port, ureg->sirfsoc_int_en_reg)|
251                                 uint_en->sirfsoc_txfifo_empty_en);
252                 else
253                         wr_regl(port, ureg->sirfsoc_int_en_reg,
254                                 uint_en->sirfsoc_txfifo_empty_en);
255                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
256         } else {
257                 /* tx transfer mode switch into dma mode */
258                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
259                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
260                         rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
261                         ~SIRFUART_IO_MODE);
262                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
263                 tran_size &= ~(0x3);
264
265                 sirfport->tx_dma_addr = dma_map_single(port->dev,
266                         xmit->buf + xmit->tail,
267                         tran_size, DMA_TO_DEVICE);
268                 sirfport->tx_dma_desc = dmaengine_prep_slave_single(
269                         sirfport->tx_dma_chan, sirfport->tx_dma_addr,
270                         tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
271                 if (!sirfport->tx_dma_desc) {
272                         dev_err(port->dev, "DMA prep slave single fail\n");
273                         return;
274                 }
275                 sirfport->tx_dma_desc->callback =
276                         sirfsoc_uart_tx_dma_complete_callback;
277                 sirfport->tx_dma_desc->callback_param = (void *)sirfport;
278                 sirfport->transfer_size = tran_size;
279
280                 dmaengine_submit(sirfport->tx_dma_desc);
281                 dma_async_issue_pending(sirfport->tx_dma_chan);
282                 sirfport->tx_dma_state = TX_DMA_RUNNING;
283         }
284 }
285
286 static void sirfsoc_uart_start_tx(struct uart_port *port)
287 {
288         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
289         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
290         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
291         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
292                 sirfsoc_uart_tx_with_dma(sirfport);
293         else {
294                 sirfsoc_uart_pio_tx_chars(sirfport, 1);
295                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
296                 if (!sirfport->is_marco)
297                         wr_regl(port, ureg->sirfsoc_int_en_reg,
298                                         rd_regl(port, ureg->sirfsoc_int_en_reg)|
299                                         uint_en->sirfsoc_txfifo_empty_en);
300                 else
301                         wr_regl(port, ureg->sirfsoc_int_en_reg,
302                                         uint_en->sirfsoc_txfifo_empty_en);
303         }
304 }
305
306 static void sirfsoc_uart_stop_rx(struct uart_port *port)
307 {
308         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
309         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
310         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
311
312         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
313         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
314                 if (!sirfport->is_marco)
315                         wr_regl(port, ureg->sirfsoc_int_en_reg,
316                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
317                                 ~(SIRFUART_RX_DMA_INT_EN(port, uint_en) |
318                                 uint_en->sirfsoc_rx_done_en));
319                 else
320                         wr_regl(port, SIRFUART_INT_EN_CLR,
321                                         SIRFUART_RX_DMA_INT_EN(port, uint_en)|
322                                         uint_en->sirfsoc_rx_done_en);
323                 dmaengine_terminate_all(sirfport->rx_dma_chan);
324         } else {
325                 if (!sirfport->is_marco)
326                         wr_regl(port, ureg->sirfsoc_int_en_reg,
327                                 rd_regl(port, ureg->sirfsoc_int_en_reg)&
328                                 ~(SIRFUART_RX_IO_INT_EN(port, uint_en)));
329                 else
330                         wr_regl(port, SIRFUART_INT_EN_CLR,
331                                         SIRFUART_RX_IO_INT_EN(port, uint_en));
332         }
333 }
334
335 static void sirfsoc_uart_disable_ms(struct uart_port *port)
336 {
337         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
338         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
339         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
340
341         if (!sirfport->hw_flow_ctrl)
342                 return;
343         sirfport->ms_enabled = false;
344         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
345                 wr_regl(port, ureg->sirfsoc_afc_ctrl,
346                                 rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF);
347                 if (!sirfport->is_marco)
348                         wr_regl(port, ureg->sirfsoc_int_en_reg,
349                                         rd_regl(port, ureg->sirfsoc_int_en_reg)&
350                                         ~uint_en->sirfsoc_cts_en);
351                 else
352                         wr_regl(port, SIRFUART_INT_EN_CLR,
353                                         uint_en->sirfsoc_cts_en);
354         } else
355                 disable_irq(gpio_to_irq(sirfport->cts_gpio));
356 }
357
358 static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
359 {
360         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
361         struct uart_port *port = &sirfport->port;
362         if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
363                 uart_handle_cts_change(port,
364                                 !gpio_get_value(sirfport->cts_gpio));
365         return IRQ_HANDLED;
366 }
367
368 static void sirfsoc_uart_enable_ms(struct uart_port *port)
369 {
370         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
371         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
372         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
373
374         if (!sirfport->hw_flow_ctrl)
375                 return;
376         sirfport->ms_enabled = true;
377         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
378                 wr_regl(port, ureg->sirfsoc_afc_ctrl,
379                                 rd_regl(port, ureg->sirfsoc_afc_ctrl) |
380                                 SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN);
381                 if (!sirfport->is_marco)
382                         wr_regl(port, ureg->sirfsoc_int_en_reg,
383                                         rd_regl(port, ureg->sirfsoc_int_en_reg)
384                                         | uint_en->sirfsoc_cts_en);
385                 else
386                         wr_regl(port, ureg->sirfsoc_int_en_reg,
387                                         uint_en->sirfsoc_cts_en);
388         } else
389                 enable_irq(gpio_to_irq(sirfport->cts_gpio));
390 }
391
392 static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
393 {
394         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
395         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
396         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
397                 unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl);
398                 if (break_state)
399                         ulcon |= SIRFUART_SET_BREAK;
400                 else
401                         ulcon &= ~SIRFUART_SET_BREAK;
402                 wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon);
403         }
404 }
405
406 static unsigned int
407 sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
408 {
409         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
410         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
411         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
412         unsigned int ch, rx_count = 0;
413         struct tty_struct *tty;
414         tty = tty_port_tty_get(&port->state->port);
415         if (!tty)
416                 return -ENODEV;
417         while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
418                                         ufifo_st->ff_empty(port->line))) {
419                 ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) |
420                         SIRFUART_DUMMY_READ;
421                 if (unlikely(uart_handle_sysrq_char(port, ch)))
422                         continue;
423                 uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
424                 rx_count++;
425                 if (rx_count >= max_rx_count)
426                         break;
427         }
428
429         sirfport->rx_io_count += rx_count;
430         port->icount.rx += rx_count;
431
432         spin_unlock(&port->lock);
433         tty_flip_buffer_push(&port->state->port);
434         spin_lock(&port->lock);
435
436         return rx_count;
437 }
438
439 static unsigned int
440 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
441 {
442         struct uart_port *port = &sirfport->port;
443         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
444         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
445         struct circ_buf *xmit = &port->state->xmit;
446         unsigned int num_tx = 0;
447         while (!uart_circ_empty(xmit) &&
448                 !(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
449                                         ufifo_st->ff_full(port->line)) &&
450                 count--) {
451                 wr_regl(port, ureg->sirfsoc_tx_fifo_data,
452                                 xmit->buf[xmit->tail]);
453                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
454                 port->icount.tx++;
455                 num_tx++;
456         }
457         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
458                 uart_write_wakeup(port);
459         return num_tx;
460 }
461
462 static void sirfsoc_uart_tx_dma_complete_callback(void *param)
463 {
464         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
465         struct uart_port *port = &sirfport->port;
466         struct circ_buf *xmit = &port->state->xmit;
467         unsigned long flags;
468
469         xmit->tail = (xmit->tail + sirfport->transfer_size) &
470                                 (UART_XMIT_SIZE - 1);
471         port->icount.tx += sirfport->transfer_size;
472         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
473                 uart_write_wakeup(port);
474         if (sirfport->tx_dma_addr)
475                 dma_unmap_single(port->dev, sirfport->tx_dma_addr,
476                                 sirfport->transfer_size, DMA_TO_DEVICE);
477         spin_lock_irqsave(&sirfport->tx_lock, flags);
478         sirfport->tx_dma_state = TX_DMA_IDLE;
479         sirfsoc_uart_tx_with_dma(sirfport);
480         spin_unlock_irqrestore(&sirfport->tx_lock, flags);
481 }
482
483 static void sirfsoc_uart_insert_rx_buf_to_tty(
484                 struct sirfsoc_uart_port *sirfport, int count)
485 {
486         struct uart_port *port = &sirfport->port;
487         struct tty_port *tport = &port->state->port;
488         int inserted;
489
490         inserted = tty_insert_flip_string(tport,
491                 sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
492         port->icount.rx += inserted;
493         tty_flip_buffer_push(tport);
494 }
495
496 static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
497 {
498         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
499
500         sirfport->rx_dma_items[index].xmit.tail =
501                 sirfport->rx_dma_items[index].xmit.head = 0;
502         sirfport->rx_dma_items[index].desc =
503                 dmaengine_prep_slave_single(sirfport->rx_dma_chan,
504                 sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
505                 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
506         if (!sirfport->rx_dma_items[index].desc) {
507                 dev_err(port->dev, "DMA slave single fail\n");
508                 return;
509         }
510         sirfport->rx_dma_items[index].desc->callback =
511                 sirfsoc_uart_rx_dma_complete_callback;
512         sirfport->rx_dma_items[index].desc->callback_param = sirfport;
513         sirfport->rx_dma_items[index].cookie =
514                 dmaengine_submit(sirfport->rx_dma_items[index].desc);
515         dma_async_issue_pending(sirfport->rx_dma_chan);
516 }
517
518 static void sirfsoc_rx_tmo_process_tl(unsigned long param)
519 {
520         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
521         struct uart_port *port = &sirfport->port;
522         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
523         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
524         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
525         unsigned int count;
526         unsigned long flags;
527         struct dma_tx_state tx_state;
528
529         spin_lock_irqsave(&sirfport->rx_lock, flags);
530         while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
531                 sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
532                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
533                                         SIRFSOC_RX_DMA_BUF_SIZE);
534                 sirfport->rx_completed++;
535                 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
536         }
537         count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
538                 sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail,
539                 SIRFSOC_RX_DMA_BUF_SIZE);
540         if (count > 0)
541                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count);
542         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
543                         rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
544                         SIRFUART_IO_MODE);
545         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
546         spin_lock(&port->lock);
547         sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
548         spin_unlock(&port->lock);
549         if (sirfport->rx_io_count == 4) {
550                 spin_lock_irqsave(&sirfport->rx_lock, flags);
551                 sirfport->rx_io_count = 0;
552                 wr_regl(port, ureg->sirfsoc_int_st_reg,
553                                 uint_st->sirfsoc_rx_done);
554                 if (!sirfport->is_marco)
555                         wr_regl(port, ureg->sirfsoc_int_en_reg,
556                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
557                                 ~(uint_en->sirfsoc_rx_done_en));
558                 else
559                         wr_regl(port, SIRFUART_INT_EN_CLR,
560                                         uint_en->sirfsoc_rx_done_en);
561                 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
562
563                 sirfsoc_uart_start_next_rx_dma(port);
564         } else {
565                 spin_lock_irqsave(&sirfport->rx_lock, flags);
566                 wr_regl(port, ureg->sirfsoc_int_st_reg,
567                                 uint_st->sirfsoc_rx_done);
568                 if (!sirfport->is_marco)
569                         wr_regl(port, ureg->sirfsoc_int_en_reg,
570                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
571                                 (uint_en->sirfsoc_rx_done_en));
572                 else
573                         wr_regl(port, ureg->sirfsoc_int_en_reg,
574                                         uint_en->sirfsoc_rx_done_en);
575                 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
576         }
577 }
578
579 static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
580 {
581         struct uart_port *port = &sirfport->port;
582         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
583         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
584         struct dma_tx_state tx_state;
585         spin_lock(&sirfport->rx_lock);
586
587         dmaengine_tx_status(sirfport->rx_dma_chan,
588                 sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
589         dmaengine_terminate_all(sirfport->rx_dma_chan);
590         sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
591                 SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
592         if (!sirfport->is_marco)
593                 wr_regl(port, ureg->sirfsoc_int_en_reg,
594                         rd_regl(port, ureg->sirfsoc_int_en_reg) &
595                         ~(uint_en->sirfsoc_rx_timeout_en));
596         else
597                 wr_regl(port, SIRFUART_INT_EN_CLR,
598                                 uint_en->sirfsoc_rx_timeout_en);
599         spin_unlock(&sirfport->rx_lock);
600         tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
601 }
602
603 static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport)
604 {
605         struct uart_port *port = &sirfport->port;
606         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
607         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
608         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
609
610         sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
611         if (sirfport->rx_io_count == 4) {
612                 sirfport->rx_io_count = 0;
613                 if (!sirfport->is_marco)
614                         wr_regl(port, ureg->sirfsoc_int_en_reg,
615                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
616                                 ~(uint_en->sirfsoc_rx_done_en));
617                 else
618                         wr_regl(port, SIRFUART_INT_EN_CLR,
619                                         uint_en->sirfsoc_rx_done_en);
620                 wr_regl(port, ureg->sirfsoc_int_st_reg,
621                                 uint_st->sirfsoc_rx_timeout);
622                 sirfsoc_uart_start_next_rx_dma(port);
623         }
624 }
625
626 static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
627 {
628         unsigned long intr_status;
629         unsigned long cts_status;
630         unsigned long flag = TTY_NORMAL;
631         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
632         struct uart_port *port = &sirfport->port;
633         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
634         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
635         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
636         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
637         struct uart_state *state = port->state;
638         struct circ_buf *xmit = &port->state->xmit;
639         spin_lock(&port->lock);
640         intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
641         wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
642         intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
643         if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) {
644                 if (intr_status & uint_st->sirfsoc_rxd_brk) {
645                         port->icount.brk++;
646                         if (uart_handle_break(port))
647                                 goto recv_char;
648                 }
649                 if (intr_status & uint_st->sirfsoc_rx_oflow)
650                         port->icount.overrun++;
651                 if (intr_status & uint_st->sirfsoc_frm_err) {
652                         port->icount.frame++;
653                         flag = TTY_FRAME;
654                 }
655                 if (intr_status & uint_st->sirfsoc_parity_err)
656                         flag = TTY_PARITY;
657                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
658                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
659                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
660                 intr_status &= port->read_status_mask;
661                 uart_insert_char(port, intr_status,
662                                         uint_en->sirfsoc_rx_oflow_en, 0, flag);
663                 tty_flip_buffer_push(&state->port);
664         }
665 recv_char:
666         if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
667                         (intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
668                         !sirfport->tx_dma_state) {
669                 cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
670                                         SIRFUART_AFC_CTS_STATUS;
671                 if (cts_status != 0)
672                         cts_status = 0;
673                 else
674                         cts_status = 1;
675                 uart_handle_cts_change(port, cts_status);
676                 wake_up_interruptible(&state->port.delta_msr_wait);
677         }
678         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
679                 if (intr_status & uint_st->sirfsoc_rx_timeout)
680                         sirfsoc_uart_handle_rx_tmo(sirfport);
681                 if (intr_status & uint_st->sirfsoc_rx_done)
682                         sirfsoc_uart_handle_rx_done(sirfport);
683         } else {
684                 if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))
685                         sirfsoc_uart_pio_rx_chars(port,
686                                         SIRFSOC_UART_IO_RX_MAX_CNT);
687         }
688         if (intr_status & uint_st->sirfsoc_txfifo_empty) {
689                 if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
690                         sirfsoc_uart_tx_with_dma(sirfport);
691                 else {
692                         if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
693                                 spin_unlock(&port->lock);
694                                 return IRQ_HANDLED;
695                         } else {
696                                 sirfsoc_uart_pio_tx_chars(sirfport,
697                                         SIRFSOC_UART_IO_TX_REASONABLE_CNT);
698                                 if ((uart_circ_empty(xmit)) &&
699                                 (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
700                                 ufifo_st->ff_empty(port->line)))
701                                         sirfsoc_uart_stop_tx(port);
702                         }
703                 }
704         }
705         spin_unlock(&port->lock);
706         return IRQ_HANDLED;
707 }
708
709 static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
710 {
711         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
712         struct uart_port *port = &sirfport->port;
713         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
714         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
715         unsigned long flags;
716         struct dma_tx_state tx_state;
717         spin_lock_irqsave(&sirfport->rx_lock, flags);
718         while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
719                         sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
720                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
721                                         SIRFSOC_RX_DMA_BUF_SIZE);
722                 if (rd_regl(port, ureg->sirfsoc_int_en_reg) &
723                                 uint_en->sirfsoc_rx_timeout_en)
724                         sirfsoc_rx_submit_one_dma_desc(port,
725                                         sirfport->rx_completed++);
726                 else
727                         sirfport->rx_completed++;
728                 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
729         }
730         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
731 }
732
733 static void sirfsoc_uart_rx_dma_complete_callback(void *param)
734 {
735         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
736         spin_lock(&sirfport->rx_lock);
737         sirfport->rx_issued++;
738         sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
739         spin_unlock(&sirfport->rx_lock);
740         tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
741 }
742
743 /* submit rx dma task into dmaengine */
744 static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
745 {
746         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
747         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
748         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
749         unsigned long flags;
750         int i;
751         spin_lock_irqsave(&sirfport->rx_lock, flags);
752         sirfport->rx_io_count = 0;
753         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
754                 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
755                 ~SIRFUART_IO_MODE);
756         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
757         for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
758                 sirfsoc_rx_submit_one_dma_desc(port, i);
759         sirfport->rx_completed = sirfport->rx_issued = 0;
760         spin_lock_irqsave(&sirfport->rx_lock, flags);
761         if (!sirfport->is_marco)
762                 wr_regl(port, ureg->sirfsoc_int_en_reg,
763                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
764                                 SIRFUART_RX_DMA_INT_EN(port, uint_en));
765         else
766                 wr_regl(port, ureg->sirfsoc_int_en_reg,
767                         SIRFUART_RX_DMA_INT_EN(port, uint_en));
768         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
769 }
770
771 static void sirfsoc_uart_start_rx(struct uart_port *port)
772 {
773         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
774         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
775         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
776
777         sirfport->rx_io_count = 0;
778         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
779         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
780         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
781         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
782                 sirfsoc_uart_start_next_rx_dma(port);
783         else {
784                 if (!sirfport->is_marco)
785                         wr_regl(port, ureg->sirfsoc_int_en_reg,
786                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
787                                 SIRFUART_RX_IO_INT_EN(port, uint_en));
788                 else
789                         wr_regl(port, ureg->sirfsoc_int_en_reg,
790                                 SIRFUART_RX_IO_INT_EN(port, uint_en));
791         }
792 }
793
794 static unsigned int
795 sirfsoc_usp_calc_sample_div(unsigned long set_rate,
796                 unsigned long ioclk_rate, unsigned long *sample_reg)
797 {
798         unsigned long min_delta = ~0UL;
799         unsigned short sample_div;
800         unsigned long ioclk_div = 0;
801         unsigned long temp_delta;
802
803         for (sample_div = SIRF_MIN_SAMPLE_DIV;
804                         sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
805                 temp_delta = ioclk_rate -
806                 (ioclk_rate + (set_rate * sample_div) / 2)
807                 / (set_rate * sample_div) * set_rate * sample_div;
808
809                 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
810                 if (temp_delta < min_delta) {
811                         ioclk_div = (2 * ioclk_rate /
812                                 (set_rate * sample_div) + 1) / 2 - 1;
813                         if (ioclk_div > SIRF_IOCLK_DIV_MAX)
814                                 continue;
815                         min_delta = temp_delta;
816                         *sample_reg = sample_div;
817                         if (!temp_delta)
818                                 break;
819                 }
820         }
821         return ioclk_div;
822 }
823
824 static unsigned int
825 sirfsoc_uart_calc_sample_div(unsigned long baud_rate,
826                         unsigned long ioclk_rate, unsigned long *set_baud)
827 {
828         unsigned long min_delta = ~0UL;
829         unsigned short sample_div;
830         unsigned int regv = 0;
831         unsigned long ioclk_div;
832         unsigned long baud_tmp;
833         int temp_delta;
834
835         for (sample_div = SIRF_MIN_SAMPLE_DIV;
836                         sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
837                 ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
838                 if (ioclk_div > SIRF_IOCLK_DIV_MAX)
839                         continue;
840                 baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
841                 temp_delta = baud_tmp - baud_rate;
842                 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
843                 if (temp_delta < min_delta) {
844                         regv = regv & (~SIRF_IOCLK_DIV_MASK);
845                         regv = regv | ioclk_div;
846                         regv = regv & (~SIRF_SAMPLE_DIV_MASK);
847                         regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
848                         min_delta = temp_delta;
849                         *set_baud = baud_tmp;
850                 }
851         }
852         return regv;
853 }
854
855 static void sirfsoc_uart_set_termios(struct uart_port *port,
856                                        struct ktermios *termios,
857                                        struct ktermios *old)
858 {
859         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
860         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
861         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
862         unsigned long   config_reg = 0;
863         unsigned long   baud_rate;
864         unsigned long   set_baud;
865         unsigned long   flags;
866         unsigned long   ic;
867         unsigned int    clk_div_reg = 0;
868         unsigned long   txfifo_op_reg, ioclk_rate;
869         unsigned long   rx_time_out;
870         int             threshold_div;
871         u32             data_bit_len, stop_bit_len, len_val;
872         unsigned long   sample_div_reg = 0xf;
873         ioclk_rate      = port->uartclk;
874
875         switch (termios->c_cflag & CSIZE) {
876         default:
877         case CS8:
878                 data_bit_len = 8;
879                 config_reg |= SIRFUART_DATA_BIT_LEN_8;
880                 break;
881         case CS7:
882                 data_bit_len = 7;
883                 config_reg |= SIRFUART_DATA_BIT_LEN_7;
884                 break;
885         case CS6:
886                 data_bit_len = 6;
887                 config_reg |= SIRFUART_DATA_BIT_LEN_6;
888                 break;
889         case CS5:
890                 data_bit_len = 5;
891                 config_reg |= SIRFUART_DATA_BIT_LEN_5;
892                 break;
893         }
894         if (termios->c_cflag & CSTOPB) {
895                 config_reg |= SIRFUART_STOP_BIT_LEN_2;
896                 stop_bit_len = 2;
897         } else
898                 stop_bit_len = 1;
899
900         spin_lock_irqsave(&port->lock, flags);
901         port->read_status_mask = uint_en->sirfsoc_rx_oflow_en;
902         port->ignore_status_mask = 0;
903         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
904                 if (termios->c_iflag & INPCK)
905                         port->read_status_mask |= uint_en->sirfsoc_frm_err_en |
906                                 uint_en->sirfsoc_parity_err_en;
907         } else {
908                 if (termios->c_iflag & INPCK)
909                         port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
910         }
911         if (termios->c_iflag & (BRKINT | PARMRK))
912                         port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
913         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
914                 if (termios->c_iflag & IGNPAR)
915                         port->ignore_status_mask |=
916                                 uint_en->sirfsoc_frm_err_en |
917                                 uint_en->sirfsoc_parity_err_en;
918                 if (termios->c_cflag & PARENB) {
919                         if (termios->c_cflag & CMSPAR) {
920                                 if (termios->c_cflag & PARODD)
921                                         config_reg |= SIRFUART_STICK_BIT_MARK;
922                                 else
923                                         config_reg |= SIRFUART_STICK_BIT_SPACE;
924                         } else if (termios->c_cflag & PARODD) {
925                                 config_reg |= SIRFUART_STICK_BIT_ODD;
926                         } else {
927                                 config_reg |= SIRFUART_STICK_BIT_EVEN;
928                         }
929                 }
930         } else {
931                 if (termios->c_iflag & IGNPAR)
932                         port->ignore_status_mask |=
933                                 uint_en->sirfsoc_frm_err_en;
934                 if (termios->c_cflag & PARENB)
935                         dev_warn(port->dev,
936                                         "USP-UART not support parity err\n");
937         }
938         if (termios->c_iflag & IGNBRK) {
939                 port->ignore_status_mask |=
940                         uint_en->sirfsoc_rxd_brk_en;
941                 if (termios->c_iflag & IGNPAR)
942                         port->ignore_status_mask |=
943                                 uint_en->sirfsoc_rx_oflow_en;
944         }
945         if ((termios->c_cflag & CREAD) == 0)
946                 port->ignore_status_mask |= SIRFUART_DUMMY_READ;
947         /* Hardware Flow Control Settings */
948         if (UART_ENABLE_MS(port, termios->c_cflag)) {
949                 if (!sirfport->ms_enabled)
950                         sirfsoc_uart_enable_ms(port);
951         } else {
952                 if (sirfport->ms_enabled)
953                         sirfsoc_uart_disable_ms(port);
954         }
955         baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
956         if (ioclk_rate == 150000000) {
957                 for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
958                         if (baud_rate == baudrate_to_regv[ic].baud_rate)
959                                 clk_div_reg = baudrate_to_regv[ic].reg_val;
960         }
961         set_baud = baud_rate;
962         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
963                 if (unlikely(clk_div_reg == 0))
964                         clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate,
965                                         ioclk_rate, &set_baud);
966                 wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg);
967         } else {
968                 clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate,
969                                 ioclk_rate, &sample_div_reg);
970                 sample_div_reg--;
971                 set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) /
972                                 (sample_div_reg + 1));
973                 /* setting usp mode 2 */
974                 len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) |
975                                 (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET));
976                 len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK)
977                                 << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET);
978                 wr_regl(port, ureg->sirfsoc_mode2, len_val);
979         }
980         if (tty_termios_baud_rate(termios))
981                 tty_termios_encode_baud_rate(termios, set_baud, set_baud);
982         /* set receive timeout && data bits len */
983         rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
984         rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
985         txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
986         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP);
987         wr_regl(port, ureg->sirfsoc_tx_fifo_op,
988                         (txfifo_op_reg & ~SIRFUART_FIFO_START));
989         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
990                 config_reg |= SIRFUART_RECV_TIMEOUT(port, rx_time_out);
991                 wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
992         } else {
993                 /*tx frame ctrl*/
994                 len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET;
995                 len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
996                                 SIRFSOC_USP_TX_FRAME_LEN_OFFSET;
997                 len_val |= ((data_bit_len - 1) <<
998                                 SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET);
999                 len_val |= (((clk_div_reg & 0xc00) >> 10) <<
1000                                 SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET);
1001                 wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val);
1002                 /*rx frame ctrl*/
1003                 len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET;
1004                 len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
1005                                 SIRFSOC_USP_RX_FRAME_LEN_OFFSET;
1006                 len_val |= (data_bit_len - 1) <<
1007                                 SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET;
1008                 len_val |= (((clk_div_reg & 0xf000) >> 12) <<
1009                                 SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET);
1010                 wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val);
1011                 /*async param*/
1012                 wr_regl(port, ureg->sirfsoc_async_param_reg,
1013                         (SIRFUART_RECV_TIMEOUT(port, rx_time_out)) |
1014                         (sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
1015                         SIRFSOC_USP_ASYNC_DIV2_OFFSET);
1016         }
1017         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
1018                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
1019         else
1020                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
1021         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
1022                 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
1023         else
1024                 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
1025         /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
1026         if (set_baud < 1000000)
1027                 threshold_div = 1;
1028         else
1029                 threshold_div = 2;
1030         wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl,
1031                                 SIRFUART_FIFO_THD(port) / threshold_div);
1032         wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl,
1033                                 SIRFUART_FIFO_THD(port) / threshold_div);
1034         txfifo_op_reg |= SIRFUART_FIFO_START;
1035         wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
1036         uart_update_timeout(port, termios->c_cflag, set_baud);
1037         sirfsoc_uart_start_rx(port);
1038         wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
1039         spin_unlock_irqrestore(&port->lock, flags);
1040 }
1041
1042 static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
1043                               unsigned int oldstate)
1044 {
1045         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1046         if (!state)
1047                 clk_prepare_enable(sirfport->clk);
1048         else
1049                 clk_disable_unprepare(sirfport->clk);
1050 }
1051
1052 static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port)
1053 {
1054         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1055         dma_cap_mask_t dma_mask;
1056         struct dma_slave_config tx_slv_cfg = {
1057                 .dst_maxburst = 2,
1058         };
1059
1060         dma_cap_zero(dma_mask);
1061         dma_cap_set(DMA_SLAVE, dma_mask);
1062         sirfport->tx_dma_chan = dma_request_channel(dma_mask,
1063                 (dma_filter_fn)sirfsoc_dma_filter_id,
1064                 (void *)sirfport->tx_dma_no);
1065         if (!sirfport->tx_dma_chan) {
1066                 dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
1067                                         sirfport->tx_dma_no);
1068                 return  -EPROBE_DEFER;
1069         }
1070         dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
1071
1072         return 0;
1073 }
1074
1075 static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port *port)
1076 {
1077         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1078         dma_cap_mask_t dma_mask;
1079         int ret;
1080         int i, j;
1081         struct dma_slave_config slv_cfg = {
1082                 .src_maxburst = 2,
1083         };
1084
1085         dma_cap_zero(dma_mask);
1086         dma_cap_set(DMA_SLAVE, dma_mask);
1087         sirfport->rx_dma_chan = dma_request_channel(dma_mask,
1088                                         (dma_filter_fn)sirfsoc_dma_filter_id,
1089                                         (void *)sirfport->rx_dma_no);
1090         if (!sirfport->rx_dma_chan) {
1091                 dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
1092                                 sirfport->rx_dma_no);
1093                 ret = -EPROBE_DEFER;
1094                 goto request_err;
1095         }
1096         for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
1097                 sirfport->rx_dma_items[i].xmit.buf =
1098                         dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1099                         &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
1100                 if (!sirfport->rx_dma_items[i].xmit.buf) {
1101                         dev_err(port->dev, "Uart alloc bufa failed\n");
1102                         ret = -ENOMEM;
1103                         goto alloc_coherent_err;
1104                 }
1105                 sirfport->rx_dma_items[i].xmit.head =
1106                         sirfport->rx_dma_items[i].xmit.tail = 0;
1107         }
1108         dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
1109
1110         return 0;
1111 alloc_coherent_err:
1112         for (j = 0; j < i; j++)
1113                 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1114                                 sirfport->rx_dma_items[j].xmit.buf,
1115                                 sirfport->rx_dma_items[j].dma_addr);
1116         dma_release_channel(sirfport->rx_dma_chan);
1117 request_err:
1118         return ret;
1119 }
1120
1121 static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port *sirfport)
1122 {
1123         dmaengine_terminate_all(sirfport->tx_dma_chan);
1124         dma_release_channel(sirfport->tx_dma_chan);
1125 }
1126
1127 static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port *sirfport)
1128 {
1129         int i;
1130         struct uart_port *port = &sirfport->port;
1131         dmaengine_terminate_all(sirfport->rx_dma_chan);
1132         dma_release_channel(sirfport->rx_dma_chan);
1133         for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
1134                 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1135                                 sirfport->rx_dma_items[i].xmit.buf,
1136                                 sirfport->rx_dma_items[i].dma_addr);
1137 }
1138
1139 static int sirfsoc_uart_startup(struct uart_port *port)
1140 {
1141         struct sirfsoc_uart_port *sirfport      = to_sirfport(port);
1142         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1143         unsigned int index                      = port->line;
1144         int ret;
1145         set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
1146         ret = request_irq(port->irq,
1147                                 sirfsoc_uart_isr,
1148                                 0,
1149                                 SIRFUART_PORT_NAME,
1150                                 sirfport);
1151         if (ret != 0) {
1152                 dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
1153                                                         index, port->irq);
1154                 goto irq_err;
1155         }
1156
1157         /* initial hardware settings */
1158         wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
1159                 rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) |
1160                 SIRFUART_IO_MODE);
1161         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
1162                 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
1163                 SIRFUART_IO_MODE);
1164         wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0);
1165         wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0);
1166         wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN);
1167         if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
1168                 wr_regl(port, ureg->sirfsoc_mode1,
1169                         SIRFSOC_USP_ENDIAN_CTRL_LSBF |
1170                         SIRFSOC_USP_EN);
1171         wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET);
1172         wr_regl(port, ureg->sirfsoc_tx_fifo_op, 0);
1173         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
1174         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
1175         wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
1176         wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
1177
1178         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
1179                 ret = sirfsoc_uart_init_rx_dma(port);
1180                 if (ret)
1181                         goto init_rx_err;
1182                 wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
1183                                 SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
1184                                 SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
1185                                 SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
1186         }
1187         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
1188                 sirfsoc_uart_init_tx_dma(port);
1189                 sirfport->tx_dma_state = TX_DMA_IDLE;
1190                 wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
1191                                 SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
1192                                 SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) |
1193                                 SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4));
1194         }
1195         sirfport->ms_enabled = false;
1196         if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
1197                 sirfport->hw_flow_ctrl) {
1198                 set_irq_flags(gpio_to_irq(sirfport->cts_gpio),
1199                         IRQF_VALID | IRQF_NOAUTOEN);
1200                 ret = request_irq(gpio_to_irq(sirfport->cts_gpio),
1201                         sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING |
1202                         IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport);
1203                 if (ret != 0) {
1204                         dev_err(port->dev, "UART-USP:request gpio irq fail\n");
1205                         goto init_rx_err;
1206                 }
1207         }
1208
1209         enable_irq(port->irq);
1210
1211         return 0;
1212 init_rx_err:
1213         free_irq(port->irq, sirfport);
1214 irq_err:
1215         return ret;
1216 }
1217
1218 static void sirfsoc_uart_shutdown(struct uart_port *port)
1219 {
1220         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1221         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1222         if (!sirfport->is_marco)
1223                 wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
1224         else
1225                 wr_regl(port, SIRFUART_INT_EN_CLR, ~0UL);
1226
1227         free_irq(port->irq, sirfport);
1228         if (sirfport->ms_enabled)
1229                 sirfsoc_uart_disable_ms(port);
1230         if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
1231                         sirfport->hw_flow_ctrl) {
1232                 gpio_set_value(sirfport->rts_gpio, 1);
1233                 free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
1234         }
1235         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
1236                 sirfsoc_uart_uninit_rx_dma(sirfport);
1237         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
1238                 sirfsoc_uart_uninit_tx_dma(sirfport);
1239                 sirfport->tx_dma_state = TX_DMA_IDLE;
1240         }
1241 }
1242
1243 static const char *sirfsoc_uart_type(struct uart_port *port)
1244 {
1245         return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
1246 }
1247
1248 static int sirfsoc_uart_request_port(struct uart_port *port)
1249 {
1250         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1251         struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param;
1252         void *ret;
1253         ret = request_mem_region(port->mapbase,
1254                 SIRFUART_MAP_SIZE, uart_param->port_name);
1255         return ret ? 0 : -EBUSY;
1256 }
1257
1258 static void sirfsoc_uart_release_port(struct uart_port *port)
1259 {
1260         release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
1261 }
1262
1263 static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
1264 {
1265         if (flags & UART_CONFIG_TYPE) {
1266                 port->type = SIRFSOC_PORT_TYPE;
1267                 sirfsoc_uart_request_port(port);
1268         }
1269 }
1270
1271 static struct uart_ops sirfsoc_uart_ops = {
1272         .tx_empty       = sirfsoc_uart_tx_empty,
1273         .get_mctrl      = sirfsoc_uart_get_mctrl,
1274         .set_mctrl      = sirfsoc_uart_set_mctrl,
1275         .stop_tx        = sirfsoc_uart_stop_tx,
1276         .start_tx       = sirfsoc_uart_start_tx,
1277         .stop_rx        = sirfsoc_uart_stop_rx,
1278         .enable_ms      = sirfsoc_uart_enable_ms,
1279         .break_ctl      = sirfsoc_uart_break_ctl,
1280         .startup        = sirfsoc_uart_startup,
1281         .shutdown       = sirfsoc_uart_shutdown,
1282         .set_termios    = sirfsoc_uart_set_termios,
1283         .pm             = sirfsoc_uart_pm,
1284         .type           = sirfsoc_uart_type,
1285         .release_port   = sirfsoc_uart_release_port,
1286         .request_port   = sirfsoc_uart_request_port,
1287         .config_port    = sirfsoc_uart_config_port,
1288 };
1289
1290 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1291 static int __init
1292 sirfsoc_uart_console_setup(struct console *co, char *options)
1293 {
1294         unsigned int baud = 115200;
1295         unsigned int bits = 8;
1296         unsigned int parity = 'n';
1297         unsigned int flow = 'n';
1298         struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
1299         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1300         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1301         if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
1302                 return -EINVAL;
1303
1304         if (!port->mapbase)
1305                 return -ENODEV;
1306
1307         /* enable usp in mode1 register */
1308         if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
1309                 wr_regl(port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
1310                                 SIRFSOC_USP_ENDIAN_CTRL_LSBF);
1311         if (options)
1312                 uart_parse_options(options, &baud, &parity, &bits, &flow);
1313         port->cons = co;
1314
1315         /* default console tx/rx transfer using io mode */
1316         sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1317         sirfport->tx_dma_no = UNVALID_DMA_CHAN;
1318         return uart_set_options(port, co, baud, parity, bits, flow);
1319 }
1320
1321 static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
1322 {
1323         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1324         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1325         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
1326         while (rd_regl(port,
1327                 ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_full(port->line))
1328                 cpu_relax();
1329         wr_regb(port, ureg->sirfsoc_tx_fifo_data, ch);
1330 }
1331
1332 static void sirfsoc_uart_console_write(struct console *co, const char *s,
1333                                                         unsigned int count)
1334 {
1335         struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
1336         uart_console_write(port, s, count, sirfsoc_uart_console_putchar);
1337 }
1338
1339 static struct console sirfsoc_uart_console = {
1340         .name           = SIRFSOC_UART_NAME,
1341         .device         = uart_console_device,
1342         .flags          = CON_PRINTBUFFER,
1343         .index          = -1,
1344         .write          = sirfsoc_uart_console_write,
1345         .setup          = sirfsoc_uart_console_setup,
1346         .data           = &sirfsoc_uart_drv,
1347 };
1348
1349 static int __init sirfsoc_uart_console_init(void)
1350 {
1351         register_console(&sirfsoc_uart_console);
1352         return 0;
1353 }
1354 console_initcall(sirfsoc_uart_console_init);
1355 #endif
1356
1357 static struct uart_driver sirfsoc_uart_drv = {
1358         .owner          = THIS_MODULE,
1359         .driver_name    = SIRFUART_PORT_NAME,
1360         .nr             = SIRFSOC_UART_NR,
1361         .dev_name       = SIRFSOC_UART_NAME,
1362         .major          = SIRFSOC_UART_MAJOR,
1363         .minor          = SIRFSOC_UART_MINOR,
1364 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1365         .cons                   = &sirfsoc_uart_console,
1366 #else
1367         .cons                   = NULL,
1368 #endif
1369 };
1370
1371 static struct of_device_id sirfsoc_uart_ids[] = {
1372         { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
1373         { .compatible = "sirf,marco-uart", .data = &sirfsoc_uart},
1374         { .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
1375         {}
1376 };
1377 MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
1378
1379 static int sirfsoc_uart_probe(struct platform_device *pdev)
1380 {
1381         struct sirfsoc_uart_port *sirfport;
1382         struct uart_port *port;
1383         struct resource *res;
1384         int ret;
1385         const struct of_device_id *match;
1386
1387         match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
1388         if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
1389                 dev_err(&pdev->dev,
1390                         "Unable to find cell-index in uart node.\n");
1391                 ret = -EFAULT;
1392                 goto err;
1393         }
1394         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart"))
1395                 pdev->id += ((struct sirfsoc_uart_register *)
1396                                 match->data)->uart_param.register_uart_nr;
1397         sirfport = &sirfsoc_uart_ports[pdev->id];
1398         port = &sirfport->port;
1399         port->dev = &pdev->dev;
1400         port->private_data = sirfport;
1401         sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;
1402
1403         sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
1404                 "sirf,uart-has-rtscts");
1405         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart")) {
1406                 sirfport->uart_reg->uart_type = SIRF_REAL_UART;
1407                 if (of_property_read_u32(pdev->dev.of_node,
1408                                 "sirf,uart-dma-rx-channel",
1409                                 &sirfport->rx_dma_no))
1410                         sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1411                 if (of_property_read_u32(pdev->dev.of_node,
1412                                 "sirf,uart-dma-tx-channel",
1413                                 &sirfport->tx_dma_no))
1414                         sirfport->tx_dma_no = UNVALID_DMA_CHAN;
1415         }
1416         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
1417                 sirfport->uart_reg->uart_type = SIRF_USP_UART;
1418                 if (of_property_read_u32(pdev->dev.of_node,
1419                                 "sirf,usp-dma-rx-channel",
1420                                 &sirfport->rx_dma_no))
1421                         sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1422                 if (of_property_read_u32(pdev->dev.of_node,
1423                                 "sirf,usp-dma-tx-channel",
1424                                 &sirfport->tx_dma_no))
1425                         sirfport->tx_dma_no = UNVALID_DMA_CHAN;
1426                 if (!sirfport->hw_flow_ctrl)
1427                         goto usp_no_flow_control;
1428                 if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
1429                         sirfport->cts_gpio = of_get_named_gpio(
1430                                         pdev->dev.of_node, "cts-gpios", 0);
1431                 else
1432                         sirfport->cts_gpio = -1;
1433                 if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL))
1434                         sirfport->rts_gpio = of_get_named_gpio(
1435                                         pdev->dev.of_node, "rts-gpios", 0);
1436                 else
1437                         sirfport->rts_gpio = -1;
1438
1439                 if ((!gpio_is_valid(sirfport->cts_gpio) ||
1440                          !gpio_is_valid(sirfport->rts_gpio))) {
1441                         ret = -EINVAL;
1442                         dev_err(&pdev->dev,
1443                                 "Usp flow control must have cts and rts gpio");
1444                         goto err;
1445                 }
1446                 ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
1447                                 "usp-cts-gpio");
1448                 if (ret) {
1449                         dev_err(&pdev->dev, "Unable request cts gpio");
1450                         goto err;
1451                 }
1452                 gpio_direction_input(sirfport->cts_gpio);
1453                 ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
1454                                 "usp-rts-gpio");
1455                 if (ret) {
1456                         dev_err(&pdev->dev, "Unable request rts gpio");
1457                         goto err;
1458                 }
1459                 gpio_direction_output(sirfport->rts_gpio, 1);
1460         }
1461 usp_no_flow_control:
1462         if (of_device_is_compatible(pdev->dev.of_node, "sirf,marco-uart"))
1463                 sirfport->is_marco = true;
1464
1465         if (of_property_read_u32(pdev->dev.of_node,
1466                         "fifosize",
1467                         &port->fifosize)) {
1468                 dev_err(&pdev->dev,
1469                         "Unable to find fifosize in uart node.\n");
1470                 ret = -EFAULT;
1471                 goto err;
1472         }
1473
1474         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1475         if (res == NULL) {
1476                 dev_err(&pdev->dev, "Insufficient resources.\n");
1477                 ret = -EFAULT;
1478                 goto err;
1479         }
1480         spin_lock_init(&sirfport->rx_lock);
1481         spin_lock_init(&sirfport->tx_lock);
1482         tasklet_init(&sirfport->rx_dma_complete_tasklet,
1483                         sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
1484         tasklet_init(&sirfport->rx_tmo_process_tasklet,
1485                         sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport);
1486         port->mapbase = res->start;
1487         port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1488         if (!port->membase) {
1489                 dev_err(&pdev->dev, "Cannot remap resource.\n");
1490                 ret = -ENOMEM;
1491                 goto err;
1492         }
1493         res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1494         if (res == NULL) {
1495                 dev_err(&pdev->dev, "Insufficient resources.\n");
1496                 ret = -EFAULT;
1497                 goto err;
1498         }
1499         port->irq = res->start;
1500
1501         sirfport->clk = clk_get(&pdev->dev, NULL);
1502         if (IS_ERR(sirfport->clk)) {
1503                 ret = PTR_ERR(sirfport->clk);
1504                 goto err;
1505         }
1506         port->uartclk = clk_get_rate(sirfport->clk);
1507
1508         port->ops = &sirfsoc_uart_ops;
1509         spin_lock_init(&port->lock);
1510
1511         platform_set_drvdata(pdev, sirfport);
1512         ret = uart_add_one_port(&sirfsoc_uart_drv, port);
1513         if (ret != 0) {
1514                 dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
1515                 goto port_err;
1516         }
1517
1518         return 0;
1519
1520 port_err:
1521         clk_put(sirfport->clk);
1522 err:
1523         return ret;
1524 }
1525
1526 static int sirfsoc_uart_remove(struct platform_device *pdev)
1527 {
1528         struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
1529         struct uart_port *port = &sirfport->port;
1530         clk_put(sirfport->clk);
1531         uart_remove_one_port(&sirfsoc_uart_drv, port);
1532         return 0;
1533 }
1534
1535 #ifdef CONFIG_PM_SLEEP
1536 static int
1537 sirfsoc_uart_suspend(struct device *pdev)
1538 {
1539         struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
1540         struct uart_port *port = &sirfport->port;
1541         uart_suspend_port(&sirfsoc_uart_drv, port);
1542         return 0;
1543 }
1544
1545 static int sirfsoc_uart_resume(struct device *pdev)
1546 {
1547         struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
1548         struct uart_port *port = &sirfport->port;
1549         uart_resume_port(&sirfsoc_uart_drv, port);
1550         return 0;
1551 }
1552 #endif
1553
1554 static const struct dev_pm_ops sirfsoc_uart_pm_ops = {
1555         SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume)
1556 };
1557
1558 static struct platform_driver sirfsoc_uart_driver = {
1559         .probe          = sirfsoc_uart_probe,
1560         .remove         = sirfsoc_uart_remove,
1561         .driver         = {
1562                 .name   = SIRFUART_PORT_NAME,
1563                 .owner  = THIS_MODULE,
1564                 .of_match_table = sirfsoc_uart_ids,
1565                 .pm     = &sirfsoc_uart_pm_ops,
1566         },
1567 };
1568
1569 static int __init sirfsoc_uart_init(void)
1570 {
1571         int ret = 0;
1572
1573         ret = uart_register_driver(&sirfsoc_uart_drv);
1574         if (ret)
1575                 goto out;
1576
1577         ret = platform_driver_register(&sirfsoc_uart_driver);
1578         if (ret)
1579                 uart_unregister_driver(&sirfsoc_uart_drv);
1580 out:
1581         return ret;
1582 }
1583 module_init(sirfsoc_uart_init);
1584
1585 static void __exit sirfsoc_uart_exit(void)
1586 {
1587         platform_driver_unregister(&sirfsoc_uart_driver);
1588         uart_unregister_driver(&sirfsoc_uart_drv);
1589 }
1590 module_exit(sirfsoc_uart_exit);
1591
1592 MODULE_LICENSE("GPL v2");
1593 MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
1594 MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");