Merge tag 'renesas-cpufreq2-for-v3.17' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / drivers / tty / serial / sirfsoc_uart.c
1 /*
2  * Driver for CSR SiRFprimaII onboard UARTs.
3  *
4  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5  *
6  * Licensed under GPLv2 or later.
7  */
8
9 #include <linux/module.h>
10 #include <linux/ioport.h>
11 #include <linux/platform_device.h>
12 #include <linux/init.h>
13 #include <linux/sysrq.h>
14 #include <linux/console.h>
15 #include <linux/tty.h>
16 #include <linux/tty_flip.h>
17 #include <linux/serial_core.h>
18 #include <linux/serial.h>
19 #include <linux/clk.h>
20 #include <linux/of.h>
21 #include <linux/slab.h>
22 #include <linux/io.h>
23 #include <linux/of_gpio.h>
24 #include <linux/dmaengine.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <asm/irq.h>
28 #include <asm/mach/irq.h>
29
30 #include "sirfsoc_uart.h"
31
32 static unsigned int
33 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
34 static unsigned int
35 sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
36 static struct uart_driver sirfsoc_uart_drv;
37
38 static void sirfsoc_uart_tx_dma_complete_callback(void *param);
39 static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port);
40 static void sirfsoc_uart_rx_dma_complete_callback(void *param);
41 static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
42         {4000000, 2359296},
43         {3500000, 1310721},
44         {3000000, 1572865},
45         {2500000, 1245186},
46         {2000000, 1572866},
47         {1500000, 1245188},
48         {1152000, 1638404},
49         {1000000, 1572869},
50         {921600, 1114120},
51         {576000, 1245196},
52         {500000, 1245198},
53         {460800, 1572876},
54         {230400, 1310750},
55         {115200, 1310781},
56         {57600, 1310843},
57         {38400, 1114328},
58         {19200, 1114545},
59         {9600, 1114979},
60 };
61
62 static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
63         [0] = {
64                 .port = {
65                         .iotype         = UPIO_MEM,
66                         .flags          = UPF_BOOT_AUTOCONF,
67                         .line           = 0,
68                 },
69         },
70         [1] = {
71                 .port = {
72                         .iotype         = UPIO_MEM,
73                         .flags          = UPF_BOOT_AUTOCONF,
74                         .line           = 1,
75                 },
76         },
77         [2] = {
78                 .port = {
79                         .iotype         = UPIO_MEM,
80                         .flags          = UPF_BOOT_AUTOCONF,
81                         .line           = 2,
82                 },
83         },
84         [3] = {
85                 .port = {
86                         .iotype         = UPIO_MEM,
87                         .flags          = UPF_BOOT_AUTOCONF,
88                         .line           = 3,
89                 },
90         },
91         [4] = {
92                 .port = {
93                         .iotype         = UPIO_MEM,
94                         .flags          = UPF_BOOT_AUTOCONF,
95                         .line           = 4,
96                 },
97         },
98         [5] = {
99                 .port = {
100                         .iotype         = UPIO_MEM,
101                         .flags          = UPF_BOOT_AUTOCONF,
102                         .line           = 5,
103                 },
104         },
105 };
106
107 static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
108 {
109         return container_of(port, struct sirfsoc_uart_port, port);
110 }
111
112 static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
113 {
114         unsigned long reg;
115         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
116         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
117         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
118         reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status);
119
120         return (reg & ufifo_st->ff_empty(port->line)) ? TIOCSER_TEMT : 0;
121 }
122
123 static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
124 {
125         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
126         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
127         if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
128                 goto cts_asserted;
129         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
130                 if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) &
131                                                 SIRFUART_AFC_CTS_STATUS))
132                         goto cts_asserted;
133                 else
134                         goto cts_deasserted;
135         } else {
136                 if (!gpio_get_value(sirfport->cts_gpio))
137                         goto cts_asserted;
138                 else
139                         goto cts_deasserted;
140         }
141 cts_deasserted:
142         return TIOCM_CAR | TIOCM_DSR;
143 cts_asserted:
144         return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
145 }
146
147 static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
148 {
149         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
150         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
151         unsigned int assert = mctrl & TIOCM_RTS;
152         unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
153         unsigned int current_val;
154
155         if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
156                 return;
157         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
158                 current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF;
159                 val |= current_val;
160                 wr_regl(port, ureg->sirfsoc_afc_ctrl, val);
161         } else {
162                 if (!val)
163                         gpio_set_value(sirfport->rts_gpio, 1);
164                 else
165                         gpio_set_value(sirfport->rts_gpio, 0);
166         }
167 }
168
169 static void sirfsoc_uart_stop_tx(struct uart_port *port)
170 {
171         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
172         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
173         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
174
175         if (sirfport->tx_dma_chan) {
176                 if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
177                         dmaengine_pause(sirfport->tx_dma_chan);
178                         sirfport->tx_dma_state = TX_DMA_PAUSE;
179                 } else {
180                         if (!sirfport->is_marco)
181                                 wr_regl(port, ureg->sirfsoc_int_en_reg,
182                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
183                                 ~uint_en->sirfsoc_txfifo_empty_en);
184                         else
185                                 wr_regl(port, SIRFUART_INT_EN_CLR,
186                                 uint_en->sirfsoc_txfifo_empty_en);
187                 }
188         } else {
189                 if (!sirfport->is_marco)
190                         wr_regl(port, ureg->sirfsoc_int_en_reg,
191                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
192                                 ~uint_en->sirfsoc_txfifo_empty_en);
193                 else
194                         wr_regl(port, SIRFUART_INT_EN_CLR,
195                                 uint_en->sirfsoc_txfifo_empty_en);
196         }
197 }
198
199 static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
200 {
201         struct uart_port *port = &sirfport->port;
202         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
203         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
204         struct circ_buf *xmit = &port->state->xmit;
205         unsigned long tran_size;
206         unsigned long tran_start;
207         unsigned long pio_tx_size;
208
209         tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
210         tran_start = (unsigned long)(xmit->buf + xmit->tail);
211         if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
212                         !tran_size)
213                 return;
214         if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
215                 dmaengine_resume(sirfport->tx_dma_chan);
216                 return;
217         }
218         if (sirfport->tx_dma_state == TX_DMA_RUNNING)
219                 return;
220         if (!sirfport->is_marco)
221                 wr_regl(port, ureg->sirfsoc_int_en_reg,
222                                 rd_regl(port, ureg->sirfsoc_int_en_reg)&
223                                 ~(uint_en->sirfsoc_txfifo_empty_en));
224         else
225                 wr_regl(port, SIRFUART_INT_EN_CLR,
226                                 uint_en->sirfsoc_txfifo_empty_en);
227         /*
228          * DMA requires buffer address and buffer length are both aligned with
229          * 4 bytes, so we use PIO for
230          * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
231          * bytes, and move to DMA for the left part aligned with 4bytes
232          * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
233          * part first, move to PIO for the left 1~3 bytes
234          */
235         if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
236                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
237                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
238                         rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
239                         SIRFUART_IO_MODE);
240                 if (BYTES_TO_ALIGN(tran_start)) {
241                         pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
242                                 BYTES_TO_ALIGN(tran_start));
243                         tran_size -= pio_tx_size;
244                 }
245                 if (tran_size < 4)
246                         sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
247                 if (!sirfport->is_marco)
248                         wr_regl(port, ureg->sirfsoc_int_en_reg,
249                                 rd_regl(port, ureg->sirfsoc_int_en_reg)|
250                                 uint_en->sirfsoc_txfifo_empty_en);
251                 else
252                         wr_regl(port, ureg->sirfsoc_int_en_reg,
253                                 uint_en->sirfsoc_txfifo_empty_en);
254                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
255         } else {
256                 /* tx transfer mode switch into dma mode */
257                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
258                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
259                         rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
260                         ~SIRFUART_IO_MODE);
261                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
262                 tran_size &= ~(0x3);
263
264                 sirfport->tx_dma_addr = dma_map_single(port->dev,
265                         xmit->buf + xmit->tail,
266                         tran_size, DMA_TO_DEVICE);
267                 sirfport->tx_dma_desc = dmaengine_prep_slave_single(
268                         sirfport->tx_dma_chan, sirfport->tx_dma_addr,
269                         tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
270                 if (!sirfport->tx_dma_desc) {
271                         dev_err(port->dev, "DMA prep slave single fail\n");
272                         return;
273                 }
274                 sirfport->tx_dma_desc->callback =
275                         sirfsoc_uart_tx_dma_complete_callback;
276                 sirfport->tx_dma_desc->callback_param = (void *)sirfport;
277                 sirfport->transfer_size = tran_size;
278
279                 dmaengine_submit(sirfport->tx_dma_desc);
280                 dma_async_issue_pending(sirfport->tx_dma_chan);
281                 sirfport->tx_dma_state = TX_DMA_RUNNING;
282         }
283 }
284
285 static void sirfsoc_uart_start_tx(struct uart_port *port)
286 {
287         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
288         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
289         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
290         if (sirfport->tx_dma_chan)
291                 sirfsoc_uart_tx_with_dma(sirfport);
292         else {
293                 sirfsoc_uart_pio_tx_chars(sirfport, 1);
294                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
295                 if (!sirfport->is_marco)
296                         wr_regl(port, ureg->sirfsoc_int_en_reg,
297                                         rd_regl(port, ureg->sirfsoc_int_en_reg)|
298                                         uint_en->sirfsoc_txfifo_empty_en);
299                 else
300                         wr_regl(port, ureg->sirfsoc_int_en_reg,
301                                         uint_en->sirfsoc_txfifo_empty_en);
302         }
303 }
304
305 static void sirfsoc_uart_stop_rx(struct uart_port *port)
306 {
307         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
308         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
309         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
310
311         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
312         if (sirfport->rx_dma_chan) {
313                 if (!sirfport->is_marco)
314                         wr_regl(port, ureg->sirfsoc_int_en_reg,
315                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
316                                 ~(SIRFUART_RX_DMA_INT_EN(port, uint_en) |
317                                 uint_en->sirfsoc_rx_done_en));
318                 else
319                         wr_regl(port, SIRFUART_INT_EN_CLR,
320                                         SIRFUART_RX_DMA_INT_EN(port, uint_en)|
321                                         uint_en->sirfsoc_rx_done_en);
322                 dmaengine_terminate_all(sirfport->rx_dma_chan);
323         } else {
324                 if (!sirfport->is_marco)
325                         wr_regl(port, ureg->sirfsoc_int_en_reg,
326                                 rd_regl(port, ureg->sirfsoc_int_en_reg)&
327                                 ~(SIRFUART_RX_IO_INT_EN(port, uint_en)));
328                 else
329                         wr_regl(port, SIRFUART_INT_EN_CLR,
330                                         SIRFUART_RX_IO_INT_EN(port, uint_en));
331         }
332 }
333
334 static void sirfsoc_uart_disable_ms(struct uart_port *port)
335 {
336         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
337         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
338         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
339
340         if (!sirfport->hw_flow_ctrl)
341                 return;
342         sirfport->ms_enabled = false;
343         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
344                 wr_regl(port, ureg->sirfsoc_afc_ctrl,
345                                 rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF);
346                 if (!sirfport->is_marco)
347                         wr_regl(port, ureg->sirfsoc_int_en_reg,
348                                         rd_regl(port, ureg->sirfsoc_int_en_reg)&
349                                         ~uint_en->sirfsoc_cts_en);
350                 else
351                         wr_regl(port, SIRFUART_INT_EN_CLR,
352                                         uint_en->sirfsoc_cts_en);
353         } else
354                 disable_irq(gpio_to_irq(sirfport->cts_gpio));
355 }
356
357 static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
358 {
359         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
360         struct uart_port *port = &sirfport->port;
361         spin_lock(&port->lock);
362         if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
363                 uart_handle_cts_change(port,
364                                 !gpio_get_value(sirfport->cts_gpio));
365         spin_unlock(&port->lock);
366         return IRQ_HANDLED;
367 }
368
369 static void sirfsoc_uart_enable_ms(struct uart_port *port)
370 {
371         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
372         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
373         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
374
375         if (!sirfport->hw_flow_ctrl)
376                 return;
377         sirfport->ms_enabled = true;
378         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
379                 wr_regl(port, ureg->sirfsoc_afc_ctrl,
380                                 rd_regl(port, ureg->sirfsoc_afc_ctrl) |
381                                 SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN);
382                 if (!sirfport->is_marco)
383                         wr_regl(port, ureg->sirfsoc_int_en_reg,
384                                         rd_regl(port, ureg->sirfsoc_int_en_reg)
385                                         | uint_en->sirfsoc_cts_en);
386                 else
387                         wr_regl(port, ureg->sirfsoc_int_en_reg,
388                                         uint_en->sirfsoc_cts_en);
389         } else
390                 enable_irq(gpio_to_irq(sirfport->cts_gpio));
391 }
392
393 static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
394 {
395         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
396         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
397         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
398                 unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl);
399                 if (break_state)
400                         ulcon |= SIRFUART_SET_BREAK;
401                 else
402                         ulcon &= ~SIRFUART_SET_BREAK;
403                 wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon);
404         }
405 }
406
407 static unsigned int
408 sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
409 {
410         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
411         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
412         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
413         unsigned int ch, rx_count = 0;
414         struct tty_struct *tty;
415         tty = tty_port_tty_get(&port->state->port);
416         if (!tty)
417                 return -ENODEV;
418         while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
419                                         ufifo_st->ff_empty(port->line))) {
420                 ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) |
421                         SIRFUART_DUMMY_READ;
422                 if (unlikely(uart_handle_sysrq_char(port, ch)))
423                         continue;
424                 uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
425                 rx_count++;
426                 if (rx_count >= max_rx_count)
427                         break;
428         }
429
430         sirfport->rx_io_count += rx_count;
431         port->icount.rx += rx_count;
432
433         return rx_count;
434 }
435
436 static unsigned int
437 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
438 {
439         struct uart_port *port = &sirfport->port;
440         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
441         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
442         struct circ_buf *xmit = &port->state->xmit;
443         unsigned int num_tx = 0;
444         while (!uart_circ_empty(xmit) &&
445                 !(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
446                                         ufifo_st->ff_full(port->line)) &&
447                 count--) {
448                 wr_regl(port, ureg->sirfsoc_tx_fifo_data,
449                                 xmit->buf[xmit->tail]);
450                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
451                 port->icount.tx++;
452                 num_tx++;
453         }
454         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
455                 uart_write_wakeup(port);
456         return num_tx;
457 }
458
459 static void sirfsoc_uart_tx_dma_complete_callback(void *param)
460 {
461         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
462         struct uart_port *port = &sirfport->port;
463         struct circ_buf *xmit = &port->state->xmit;
464         unsigned long flags;
465
466         spin_lock_irqsave(&port->lock, flags);
467         xmit->tail = (xmit->tail + sirfport->transfer_size) &
468                                 (UART_XMIT_SIZE - 1);
469         port->icount.tx += sirfport->transfer_size;
470         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
471                 uart_write_wakeup(port);
472         if (sirfport->tx_dma_addr)
473                 dma_unmap_single(port->dev, sirfport->tx_dma_addr,
474                                 sirfport->transfer_size, DMA_TO_DEVICE);
475         sirfport->tx_dma_state = TX_DMA_IDLE;
476         sirfsoc_uart_tx_with_dma(sirfport);
477         spin_unlock_irqrestore(&port->lock, flags);
478 }
479
480 static void sirfsoc_uart_insert_rx_buf_to_tty(
481                 struct sirfsoc_uart_port *sirfport, int count)
482 {
483         struct uart_port *port = &sirfport->port;
484         struct tty_port *tport = &port->state->port;
485         int inserted;
486
487         inserted = tty_insert_flip_string(tport,
488                 sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
489         port->icount.rx += inserted;
490 }
491
492 static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
493 {
494         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
495
496         sirfport->rx_dma_items[index].xmit.tail =
497                 sirfport->rx_dma_items[index].xmit.head = 0;
498         sirfport->rx_dma_items[index].desc =
499                 dmaengine_prep_slave_single(sirfport->rx_dma_chan,
500                 sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
501                 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
502         if (!sirfport->rx_dma_items[index].desc) {
503                 dev_err(port->dev, "DMA slave single fail\n");
504                 return;
505         }
506         sirfport->rx_dma_items[index].desc->callback =
507                 sirfsoc_uart_rx_dma_complete_callback;
508         sirfport->rx_dma_items[index].desc->callback_param = sirfport;
509         sirfport->rx_dma_items[index].cookie =
510                 dmaengine_submit(sirfport->rx_dma_items[index].desc);
511         dma_async_issue_pending(sirfport->rx_dma_chan);
512 }
513
514 static void sirfsoc_rx_tmo_process_tl(unsigned long param)
515 {
516         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
517         struct uart_port *port = &sirfport->port;
518         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
519         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
520         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
521         unsigned int count;
522         unsigned long flags;
523         struct dma_tx_state tx_state;
524
525         spin_lock_irqsave(&port->lock, flags);
526         while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
527                 sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
528                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
529                                         SIRFSOC_RX_DMA_BUF_SIZE);
530                 sirfport->rx_completed++;
531                 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
532         }
533         count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
534                 sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail,
535                 SIRFSOC_RX_DMA_BUF_SIZE);
536         if (count > 0)
537                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count);
538         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
539                         rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
540                         SIRFUART_IO_MODE);
541         sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
542         if (sirfport->rx_io_count == 4) {
543                 sirfport->rx_io_count = 0;
544                 wr_regl(port, ureg->sirfsoc_int_st_reg,
545                                 uint_st->sirfsoc_rx_done);
546                 if (!sirfport->is_marco)
547                         wr_regl(port, ureg->sirfsoc_int_en_reg,
548                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
549                                 ~(uint_en->sirfsoc_rx_done_en));
550                 else
551                         wr_regl(port, SIRFUART_INT_EN_CLR,
552                                         uint_en->sirfsoc_rx_done_en);
553                 sirfsoc_uart_start_next_rx_dma(port);
554         } else {
555                 wr_regl(port, ureg->sirfsoc_int_st_reg,
556                                 uint_st->sirfsoc_rx_done);
557                 if (!sirfport->is_marco)
558                         wr_regl(port, ureg->sirfsoc_int_en_reg,
559                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
560                                 (uint_en->sirfsoc_rx_done_en));
561                 else
562                         wr_regl(port, ureg->sirfsoc_int_en_reg,
563                                         uint_en->sirfsoc_rx_done_en);
564         }
565         spin_unlock_irqrestore(&port->lock, flags);
566         tty_flip_buffer_push(&port->state->port);
567 }
568
569 static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
570 {
571         struct uart_port *port = &sirfport->port;
572         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
573         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
574         struct dma_tx_state tx_state;
575         dmaengine_tx_status(sirfport->rx_dma_chan,
576                 sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
577         dmaengine_terminate_all(sirfport->rx_dma_chan);
578         sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
579                 SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
580         if (!sirfport->is_marco)
581                 wr_regl(port, ureg->sirfsoc_int_en_reg,
582                         rd_regl(port, ureg->sirfsoc_int_en_reg) &
583                         ~(uint_en->sirfsoc_rx_timeout_en));
584         else
585                 wr_regl(port, SIRFUART_INT_EN_CLR,
586                                 uint_en->sirfsoc_rx_timeout_en);
587         tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
588 }
589
590 static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport)
591 {
592         struct uart_port *port = &sirfport->port;
593         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
594         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
595         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
596
597         sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
598         if (sirfport->rx_io_count == 4) {
599                 sirfport->rx_io_count = 0;
600                 if (!sirfport->is_marco)
601                         wr_regl(port, ureg->sirfsoc_int_en_reg,
602                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
603                                 ~(uint_en->sirfsoc_rx_done_en));
604                 else
605                         wr_regl(port, SIRFUART_INT_EN_CLR,
606                                         uint_en->sirfsoc_rx_done_en);
607                 wr_regl(port, ureg->sirfsoc_int_st_reg,
608                                 uint_st->sirfsoc_rx_timeout);
609                 sirfsoc_uart_start_next_rx_dma(port);
610         }
611 }
612
613 static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
614 {
615         unsigned long intr_status;
616         unsigned long cts_status;
617         unsigned long flag = TTY_NORMAL;
618         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
619         struct uart_port *port = &sirfport->port;
620         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
621         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
622         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
623         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
624         struct uart_state *state = port->state;
625         struct circ_buf *xmit = &port->state->xmit;
626         spin_lock(&port->lock);
627         intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
628         wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
629         intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
630         if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) {
631                 if (intr_status & uint_st->sirfsoc_rxd_brk) {
632                         port->icount.brk++;
633                         if (uart_handle_break(port))
634                                 goto recv_char;
635                 }
636                 if (intr_status & uint_st->sirfsoc_rx_oflow)
637                         port->icount.overrun++;
638                 if (intr_status & uint_st->sirfsoc_frm_err) {
639                         port->icount.frame++;
640                         flag = TTY_FRAME;
641                 }
642                 if (intr_status & uint_st->sirfsoc_parity_err)
643                         flag = TTY_PARITY;
644                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
645                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
646                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
647                 intr_status &= port->read_status_mask;
648                 uart_insert_char(port, intr_status,
649                                         uint_en->sirfsoc_rx_oflow_en, 0, flag);
650         }
651 recv_char:
652         if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
653                         (intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
654                         !sirfport->tx_dma_state) {
655                 cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
656                                         SIRFUART_AFC_CTS_STATUS;
657                 if (cts_status != 0)
658                         cts_status = 0;
659                 else
660                         cts_status = 1;
661                 uart_handle_cts_change(port, cts_status);
662                 wake_up_interruptible(&state->port.delta_msr_wait);
663         }
664         if (sirfport->rx_dma_chan) {
665                 if (intr_status & uint_st->sirfsoc_rx_timeout)
666                         sirfsoc_uart_handle_rx_tmo(sirfport);
667                 if (intr_status & uint_st->sirfsoc_rx_done)
668                         sirfsoc_uart_handle_rx_done(sirfport);
669         } else {
670                 if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))
671                         sirfsoc_uart_pio_rx_chars(port,
672                                         SIRFSOC_UART_IO_RX_MAX_CNT);
673         }
674         spin_unlock(&port->lock);
675         tty_flip_buffer_push(&state->port);
676         spin_lock(&port->lock);
677         if (intr_status & uint_st->sirfsoc_txfifo_empty) {
678                 if (sirfport->tx_dma_chan)
679                         sirfsoc_uart_tx_with_dma(sirfport);
680                 else {
681                         if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
682                                 spin_unlock(&port->lock);
683                                 return IRQ_HANDLED;
684                         } else {
685                                 sirfsoc_uart_pio_tx_chars(sirfport,
686                                         SIRFSOC_UART_IO_TX_REASONABLE_CNT);
687                                 if ((uart_circ_empty(xmit)) &&
688                                 (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
689                                 ufifo_st->ff_empty(port->line)))
690                                         sirfsoc_uart_stop_tx(port);
691                         }
692                 }
693         }
694         spin_unlock(&port->lock);
695
696         return IRQ_HANDLED;
697 }
698
699 static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
700 {
701         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
702         struct uart_port *port = &sirfport->port;
703         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
704         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
705         unsigned long flags;
706         struct dma_tx_state tx_state;
707         spin_lock_irqsave(&port->lock, flags);
708         while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
709                         sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
710                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
711                                         SIRFSOC_RX_DMA_BUF_SIZE);
712                 if (rd_regl(port, ureg->sirfsoc_int_en_reg) &
713                                 uint_en->sirfsoc_rx_timeout_en)
714                         sirfsoc_rx_submit_one_dma_desc(port,
715                                         sirfport->rx_completed++);
716                 else
717                         sirfport->rx_completed++;
718                 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
719         }
720         spin_unlock_irqrestore(&port->lock, flags);
721         tty_flip_buffer_push(&port->state->port);
722 }
723
724 static void sirfsoc_uart_rx_dma_complete_callback(void *param)
725 {
726         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
727         unsigned long flags;
728
729         spin_lock_irqsave(&sirfport->port.lock, flags);
730         sirfport->rx_issued++;
731         sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
732         tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
733         spin_unlock_irqrestore(&sirfport->port.lock, flags);
734 }
735
736 /* submit rx dma task into dmaengine */
737 static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
738 {
739         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
740         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
741         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
742         int i;
743         sirfport->rx_io_count = 0;
744         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
745                 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
746                 ~SIRFUART_IO_MODE);
747         for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
748                 sirfsoc_rx_submit_one_dma_desc(port, i);
749         sirfport->rx_completed = sirfport->rx_issued = 0;
750         if (!sirfport->is_marco)
751                 wr_regl(port, ureg->sirfsoc_int_en_reg,
752                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
753                                 SIRFUART_RX_DMA_INT_EN(port, uint_en));
754         else
755                 wr_regl(port, ureg->sirfsoc_int_en_reg,
756                         SIRFUART_RX_DMA_INT_EN(port, uint_en));
757 }
758
759 static void sirfsoc_uart_start_rx(struct uart_port *port)
760 {
761         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
762         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
763         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
764
765         sirfport->rx_io_count = 0;
766         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
767         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
768         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
769         if (sirfport->rx_dma_chan)
770                 sirfsoc_uart_start_next_rx_dma(port);
771         else {
772                 if (!sirfport->is_marco)
773                         wr_regl(port, ureg->sirfsoc_int_en_reg,
774                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
775                                 SIRFUART_RX_IO_INT_EN(port, uint_en));
776                 else
777                         wr_regl(port, ureg->sirfsoc_int_en_reg,
778                                 SIRFUART_RX_IO_INT_EN(port, uint_en));
779         }
780 }
781
782 static unsigned int
783 sirfsoc_usp_calc_sample_div(unsigned long set_rate,
784                 unsigned long ioclk_rate, unsigned long *sample_reg)
785 {
786         unsigned long min_delta = ~0UL;
787         unsigned short sample_div;
788         unsigned long ioclk_div = 0;
789         unsigned long temp_delta;
790
791         for (sample_div = SIRF_MIN_SAMPLE_DIV;
792                         sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
793                 temp_delta = ioclk_rate -
794                 (ioclk_rate + (set_rate * sample_div) / 2)
795                 / (set_rate * sample_div) * set_rate * sample_div;
796
797                 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
798                 if (temp_delta < min_delta) {
799                         ioclk_div = (2 * ioclk_rate /
800                                 (set_rate * sample_div) + 1) / 2 - 1;
801                         if (ioclk_div > SIRF_IOCLK_DIV_MAX)
802                                 continue;
803                         min_delta = temp_delta;
804                         *sample_reg = sample_div;
805                         if (!temp_delta)
806                                 break;
807                 }
808         }
809         return ioclk_div;
810 }
811
812 static unsigned int
813 sirfsoc_uart_calc_sample_div(unsigned long baud_rate,
814                         unsigned long ioclk_rate, unsigned long *set_baud)
815 {
816         unsigned long min_delta = ~0UL;
817         unsigned short sample_div;
818         unsigned int regv = 0;
819         unsigned long ioclk_div;
820         unsigned long baud_tmp;
821         int temp_delta;
822
823         for (sample_div = SIRF_MIN_SAMPLE_DIV;
824                         sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
825                 ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
826                 if (ioclk_div > SIRF_IOCLK_DIV_MAX)
827                         continue;
828                 baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
829                 temp_delta = baud_tmp - baud_rate;
830                 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
831                 if (temp_delta < min_delta) {
832                         regv = regv & (~SIRF_IOCLK_DIV_MASK);
833                         regv = regv | ioclk_div;
834                         regv = regv & (~SIRF_SAMPLE_DIV_MASK);
835                         regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
836                         min_delta = temp_delta;
837                         *set_baud = baud_tmp;
838                 }
839         }
840         return regv;
841 }
842
843 static void sirfsoc_uart_set_termios(struct uart_port *port,
844                                        struct ktermios *termios,
845                                        struct ktermios *old)
846 {
847         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
848         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
849         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
850         unsigned long   config_reg = 0;
851         unsigned long   baud_rate;
852         unsigned long   set_baud;
853         unsigned long   flags;
854         unsigned long   ic;
855         unsigned int    clk_div_reg = 0;
856         unsigned long   txfifo_op_reg, ioclk_rate;
857         unsigned long   rx_time_out;
858         int             threshold_div;
859         u32             data_bit_len, stop_bit_len, len_val;
860         unsigned long   sample_div_reg = 0xf;
861         ioclk_rate      = port->uartclk;
862
863         switch (termios->c_cflag & CSIZE) {
864         default:
865         case CS8:
866                 data_bit_len = 8;
867                 config_reg |= SIRFUART_DATA_BIT_LEN_8;
868                 break;
869         case CS7:
870                 data_bit_len = 7;
871                 config_reg |= SIRFUART_DATA_BIT_LEN_7;
872                 break;
873         case CS6:
874                 data_bit_len = 6;
875                 config_reg |= SIRFUART_DATA_BIT_LEN_6;
876                 break;
877         case CS5:
878                 data_bit_len = 5;
879                 config_reg |= SIRFUART_DATA_BIT_LEN_5;
880                 break;
881         }
882         if (termios->c_cflag & CSTOPB) {
883                 config_reg |= SIRFUART_STOP_BIT_LEN_2;
884                 stop_bit_len = 2;
885         } else
886                 stop_bit_len = 1;
887
888         spin_lock_irqsave(&port->lock, flags);
889         port->read_status_mask = uint_en->sirfsoc_rx_oflow_en;
890         port->ignore_status_mask = 0;
891         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
892                 if (termios->c_iflag & INPCK)
893                         port->read_status_mask |= uint_en->sirfsoc_frm_err_en |
894                                 uint_en->sirfsoc_parity_err_en;
895         } else {
896                 if (termios->c_iflag & INPCK)
897                         port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
898         }
899         if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
900                         port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
901         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
902                 if (termios->c_iflag & IGNPAR)
903                         port->ignore_status_mask |=
904                                 uint_en->sirfsoc_frm_err_en |
905                                 uint_en->sirfsoc_parity_err_en;
906                 if (termios->c_cflag & PARENB) {
907                         if (termios->c_cflag & CMSPAR) {
908                                 if (termios->c_cflag & PARODD)
909                                         config_reg |= SIRFUART_STICK_BIT_MARK;
910                                 else
911                                         config_reg |= SIRFUART_STICK_BIT_SPACE;
912                         } else if (termios->c_cflag & PARODD) {
913                                 config_reg |= SIRFUART_STICK_BIT_ODD;
914                         } else {
915                                 config_reg |= SIRFUART_STICK_BIT_EVEN;
916                         }
917                 }
918         } else {
919                 if (termios->c_iflag & IGNPAR)
920                         port->ignore_status_mask |=
921                                 uint_en->sirfsoc_frm_err_en;
922                 if (termios->c_cflag & PARENB)
923                         dev_warn(port->dev,
924                                         "USP-UART not support parity err\n");
925         }
926         if (termios->c_iflag & IGNBRK) {
927                 port->ignore_status_mask |=
928                         uint_en->sirfsoc_rxd_brk_en;
929                 if (termios->c_iflag & IGNPAR)
930                         port->ignore_status_mask |=
931                                 uint_en->sirfsoc_rx_oflow_en;
932         }
933         if ((termios->c_cflag & CREAD) == 0)
934                 port->ignore_status_mask |= SIRFUART_DUMMY_READ;
935         /* Hardware Flow Control Settings */
936         if (UART_ENABLE_MS(port, termios->c_cflag)) {
937                 if (!sirfport->ms_enabled)
938                         sirfsoc_uart_enable_ms(port);
939         } else {
940                 if (sirfport->ms_enabled)
941                         sirfsoc_uart_disable_ms(port);
942         }
943         baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
944         if (ioclk_rate == 150000000) {
945                 for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
946                         if (baud_rate == baudrate_to_regv[ic].baud_rate)
947                                 clk_div_reg = baudrate_to_regv[ic].reg_val;
948         }
949         set_baud = baud_rate;
950         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
951                 if (unlikely(clk_div_reg == 0))
952                         clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate,
953                                         ioclk_rate, &set_baud);
954                 wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg);
955         } else {
956                 clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate,
957                                 ioclk_rate, &sample_div_reg);
958                 sample_div_reg--;
959                 set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) /
960                                 (sample_div_reg + 1));
961                 /* setting usp mode 2 */
962                 len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) |
963                                 (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET));
964                 len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK)
965                                 << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET);
966                 wr_regl(port, ureg->sirfsoc_mode2, len_val);
967         }
968         if (tty_termios_baud_rate(termios))
969                 tty_termios_encode_baud_rate(termios, set_baud, set_baud);
970         /* set receive timeout && data bits len */
971         rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
972         rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
973         txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
974         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP);
975         wr_regl(port, ureg->sirfsoc_tx_fifo_op,
976                         (txfifo_op_reg & ~SIRFUART_FIFO_START));
977         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
978                 config_reg |= SIRFUART_RECV_TIMEOUT(port, rx_time_out);
979                 wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
980         } else {
981                 /*tx frame ctrl*/
982                 len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET;
983                 len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
984                                 SIRFSOC_USP_TX_FRAME_LEN_OFFSET;
985                 len_val |= ((data_bit_len - 1) <<
986                                 SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET);
987                 len_val |= (((clk_div_reg & 0xc00) >> 10) <<
988                                 SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET);
989                 wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val);
990                 /*rx frame ctrl*/
991                 len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET;
992                 len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
993                                 SIRFSOC_USP_RX_FRAME_LEN_OFFSET;
994                 len_val |= (data_bit_len - 1) <<
995                                 SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET;
996                 len_val |= (((clk_div_reg & 0xf000) >> 12) <<
997                                 SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET);
998                 wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val);
999                 /*async param*/
1000                 wr_regl(port, ureg->sirfsoc_async_param_reg,
1001                         (SIRFUART_RECV_TIMEOUT(port, rx_time_out)) |
1002                         (sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
1003                         SIRFSOC_USP_ASYNC_DIV2_OFFSET);
1004         }
1005         if (sirfport->tx_dma_chan)
1006                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
1007         else
1008                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
1009         if (sirfport->rx_dma_chan)
1010                 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
1011         else
1012                 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
1013         /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
1014         if (set_baud < 1000000)
1015                 threshold_div = 1;
1016         else
1017                 threshold_div = 2;
1018         wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl,
1019                                 SIRFUART_FIFO_THD(port) / threshold_div);
1020         wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl,
1021                                 SIRFUART_FIFO_THD(port) / threshold_div);
1022         txfifo_op_reg |= SIRFUART_FIFO_START;
1023         wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
1024         uart_update_timeout(port, termios->c_cflag, set_baud);
1025         sirfsoc_uart_start_rx(port);
1026         wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
1027         spin_unlock_irqrestore(&port->lock, flags);
1028 }
1029
1030 static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
1031                               unsigned int oldstate)
1032 {
1033         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1034         if (!state)
1035                 clk_prepare_enable(sirfport->clk);
1036         else
1037                 clk_disable_unprepare(sirfport->clk);
1038 }
1039
1040 static int sirfsoc_uart_startup(struct uart_port *port)
1041 {
1042         struct sirfsoc_uart_port *sirfport      = to_sirfport(port);
1043         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1044         unsigned int index                      = port->line;
1045         int ret;
1046         set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
1047         ret = request_irq(port->irq,
1048                                 sirfsoc_uart_isr,
1049                                 0,
1050                                 SIRFUART_PORT_NAME,
1051                                 sirfport);
1052         if (ret != 0) {
1053                 dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
1054                                                         index, port->irq);
1055                 goto irq_err;
1056         }
1057
1058         /* initial hardware settings */
1059         wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
1060                 rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) |
1061                 SIRFUART_IO_MODE);
1062         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
1063                 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
1064                 SIRFUART_IO_MODE);
1065         wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0);
1066         wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0);
1067         wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN);
1068         if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
1069                 wr_regl(port, ureg->sirfsoc_mode1,
1070                         SIRFSOC_USP_ENDIAN_CTRL_LSBF |
1071                         SIRFSOC_USP_EN);
1072         wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET);
1073         wr_regl(port, ureg->sirfsoc_tx_fifo_op, 0);
1074         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
1075         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
1076         wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
1077         wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
1078         if (sirfport->rx_dma_chan)
1079                 wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
1080                         SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
1081                         SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
1082                         SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
1083         if (sirfport->tx_dma_chan) {
1084                 sirfport->tx_dma_state = TX_DMA_IDLE;
1085                 wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
1086                                 SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
1087                                 SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) |
1088                                 SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4));
1089         }
1090         sirfport->ms_enabled = false;
1091         if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
1092                 sirfport->hw_flow_ctrl) {
1093                 set_irq_flags(gpio_to_irq(sirfport->cts_gpio),
1094                         IRQF_VALID | IRQF_NOAUTOEN);
1095                 ret = request_irq(gpio_to_irq(sirfport->cts_gpio),
1096                         sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING |
1097                         IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport);
1098                 if (ret != 0) {
1099                         dev_err(port->dev, "UART-USP:request gpio irq fail\n");
1100                         goto init_rx_err;
1101                 }
1102         }
1103
1104         enable_irq(port->irq);
1105
1106         return 0;
1107 init_rx_err:
1108         free_irq(port->irq, sirfport);
1109 irq_err:
1110         return ret;
1111 }
1112
1113 static void sirfsoc_uart_shutdown(struct uart_port *port)
1114 {
1115         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1116         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1117         if (!sirfport->is_marco)
1118                 wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
1119         else
1120                 wr_regl(port, SIRFUART_INT_EN_CLR, ~0UL);
1121
1122         free_irq(port->irq, sirfport);
1123         if (sirfport->ms_enabled)
1124                 sirfsoc_uart_disable_ms(port);
1125         if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
1126                         sirfport->hw_flow_ctrl) {
1127                 gpio_set_value(sirfport->rts_gpio, 1);
1128                 free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
1129         }
1130         if (sirfport->tx_dma_chan)
1131                 sirfport->tx_dma_state = TX_DMA_IDLE;
1132 }
1133
1134 static const char *sirfsoc_uart_type(struct uart_port *port)
1135 {
1136         return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
1137 }
1138
1139 static int sirfsoc_uart_request_port(struct uart_port *port)
1140 {
1141         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1142         struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param;
1143         void *ret;
1144         ret = request_mem_region(port->mapbase,
1145                 SIRFUART_MAP_SIZE, uart_param->port_name);
1146         return ret ? 0 : -EBUSY;
1147 }
1148
1149 static void sirfsoc_uart_release_port(struct uart_port *port)
1150 {
1151         release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
1152 }
1153
1154 static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
1155 {
1156         if (flags & UART_CONFIG_TYPE) {
1157                 port->type = SIRFSOC_PORT_TYPE;
1158                 sirfsoc_uart_request_port(port);
1159         }
1160 }
1161
1162 static struct uart_ops sirfsoc_uart_ops = {
1163         .tx_empty       = sirfsoc_uart_tx_empty,
1164         .get_mctrl      = sirfsoc_uart_get_mctrl,
1165         .set_mctrl      = sirfsoc_uart_set_mctrl,
1166         .stop_tx        = sirfsoc_uart_stop_tx,
1167         .start_tx       = sirfsoc_uart_start_tx,
1168         .stop_rx        = sirfsoc_uart_stop_rx,
1169         .enable_ms      = sirfsoc_uart_enable_ms,
1170         .break_ctl      = sirfsoc_uart_break_ctl,
1171         .startup        = sirfsoc_uart_startup,
1172         .shutdown       = sirfsoc_uart_shutdown,
1173         .set_termios    = sirfsoc_uart_set_termios,
1174         .pm             = sirfsoc_uart_pm,
1175         .type           = sirfsoc_uart_type,
1176         .release_port   = sirfsoc_uart_release_port,
1177         .request_port   = sirfsoc_uart_request_port,
1178         .config_port    = sirfsoc_uart_config_port,
1179 };
1180
1181 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1182 static int __init
1183 sirfsoc_uart_console_setup(struct console *co, char *options)
1184 {
1185         unsigned int baud = 115200;
1186         unsigned int bits = 8;
1187         unsigned int parity = 'n';
1188         unsigned int flow = 'n';
1189         struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
1190         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1191         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1192         if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
1193                 return -EINVAL;
1194
1195         if (!port->mapbase)
1196                 return -ENODEV;
1197
1198         /* enable usp in mode1 register */
1199         if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
1200                 wr_regl(port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
1201                                 SIRFSOC_USP_ENDIAN_CTRL_LSBF);
1202         if (options)
1203                 uart_parse_options(options, &baud, &parity, &bits, &flow);
1204         port->cons = co;
1205
1206         /* default console tx/rx transfer using io mode */
1207         sirfport->rx_dma_chan = NULL;
1208         sirfport->tx_dma_chan = NULL;
1209         return uart_set_options(port, co, baud, parity, bits, flow);
1210 }
1211
1212 static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
1213 {
1214         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1215         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1216         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
1217         while (rd_regl(port,
1218                 ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_full(port->line))
1219                 cpu_relax();
1220         wr_regl(port, ureg->sirfsoc_tx_fifo_data, ch);
1221 }
1222
1223 static void sirfsoc_uart_console_write(struct console *co, const char *s,
1224                                                         unsigned int count)
1225 {
1226         struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
1227         uart_console_write(port, s, count, sirfsoc_uart_console_putchar);
1228 }
1229
1230 static struct console sirfsoc_uart_console = {
1231         .name           = SIRFSOC_UART_NAME,
1232         .device         = uart_console_device,
1233         .flags          = CON_PRINTBUFFER,
1234         .index          = -1,
1235         .write          = sirfsoc_uart_console_write,
1236         .setup          = sirfsoc_uart_console_setup,
1237         .data           = &sirfsoc_uart_drv,
1238 };
1239
1240 static int __init sirfsoc_uart_console_init(void)
1241 {
1242         register_console(&sirfsoc_uart_console);
1243         return 0;
1244 }
1245 console_initcall(sirfsoc_uart_console_init);
1246 #endif
1247
1248 static struct uart_driver sirfsoc_uart_drv = {
1249         .owner          = THIS_MODULE,
1250         .driver_name    = SIRFUART_PORT_NAME,
1251         .nr             = SIRFSOC_UART_NR,
1252         .dev_name       = SIRFSOC_UART_NAME,
1253         .major          = SIRFSOC_UART_MAJOR,
1254         .minor          = SIRFSOC_UART_MINOR,
1255 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1256         .cons                   = &sirfsoc_uart_console,
1257 #else
1258         .cons                   = NULL,
1259 #endif
1260 };
1261
1262 static struct of_device_id sirfsoc_uart_ids[] = {
1263         { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
1264         { .compatible = "sirf,marco-uart", .data = &sirfsoc_uart},
1265         { .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
1266         {}
1267 };
1268 MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
1269
1270 static int sirfsoc_uart_probe(struct platform_device *pdev)
1271 {
1272         struct sirfsoc_uart_port *sirfport;
1273         struct uart_port *port;
1274         struct resource *res;
1275         int ret;
1276         int i, j;
1277         struct dma_slave_config slv_cfg = {
1278                 .src_maxburst = 2,
1279         };
1280         struct dma_slave_config tx_slv_cfg = {
1281                 .dst_maxburst = 2,
1282         };
1283         const struct of_device_id *match;
1284
1285         match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
1286         if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
1287                 dev_err(&pdev->dev,
1288                         "Unable to find cell-index in uart node.\n");
1289                 ret = -EFAULT;
1290                 goto err;
1291         }
1292         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart"))
1293                 pdev->id += ((struct sirfsoc_uart_register *)
1294                                 match->data)->uart_param.register_uart_nr;
1295         sirfport = &sirfsoc_uart_ports[pdev->id];
1296         port = &sirfport->port;
1297         port->dev = &pdev->dev;
1298         port->private_data = sirfport;
1299         sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;
1300
1301         sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
1302                 "sirf,uart-has-rtscts");
1303         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart"))
1304                 sirfport->uart_reg->uart_type = SIRF_REAL_UART;
1305         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
1306                 sirfport->uart_reg->uart_type = SIRF_USP_UART;
1307                 if (!sirfport->hw_flow_ctrl)
1308                         goto usp_no_flow_control;
1309                 if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
1310                         sirfport->cts_gpio = of_get_named_gpio(
1311                                         pdev->dev.of_node, "cts-gpios", 0);
1312                 else
1313                         sirfport->cts_gpio = -1;
1314                 if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL))
1315                         sirfport->rts_gpio = of_get_named_gpio(
1316                                         pdev->dev.of_node, "rts-gpios", 0);
1317                 else
1318                         sirfport->rts_gpio = -1;
1319
1320                 if ((!gpio_is_valid(sirfport->cts_gpio) ||
1321                          !gpio_is_valid(sirfport->rts_gpio))) {
1322                         ret = -EINVAL;
1323                         dev_err(&pdev->dev,
1324                                 "Usp flow control must have cts and rts gpio");
1325                         goto err;
1326                 }
1327                 ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
1328                                 "usp-cts-gpio");
1329                 if (ret) {
1330                         dev_err(&pdev->dev, "Unable request cts gpio");
1331                         goto err;
1332                 }
1333                 gpio_direction_input(sirfport->cts_gpio);
1334                 ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
1335                                 "usp-rts-gpio");
1336                 if (ret) {
1337                         dev_err(&pdev->dev, "Unable request rts gpio");
1338                         goto err;
1339                 }
1340                 gpio_direction_output(sirfport->rts_gpio, 1);
1341         }
1342 usp_no_flow_control:
1343         if (of_device_is_compatible(pdev->dev.of_node, "sirf,marco-uart"))
1344                 sirfport->is_marco = true;
1345
1346         if (of_property_read_u32(pdev->dev.of_node,
1347                         "fifosize",
1348                         &port->fifosize)) {
1349                 dev_err(&pdev->dev,
1350                         "Unable to find fifosize in uart node.\n");
1351                 ret = -EFAULT;
1352                 goto err;
1353         }
1354
1355         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1356         if (res == NULL) {
1357                 dev_err(&pdev->dev, "Insufficient resources.\n");
1358                 ret = -EFAULT;
1359                 goto err;
1360         }
1361         tasklet_init(&sirfport->rx_dma_complete_tasklet,
1362                         sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
1363         tasklet_init(&sirfport->rx_tmo_process_tasklet,
1364                         sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport);
1365         port->mapbase = res->start;
1366         port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1367         if (!port->membase) {
1368                 dev_err(&pdev->dev, "Cannot remap resource.\n");
1369                 ret = -ENOMEM;
1370                 goto err;
1371         }
1372         res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1373         if (res == NULL) {
1374                 dev_err(&pdev->dev, "Insufficient resources.\n");
1375                 ret = -EFAULT;
1376                 goto err;
1377         }
1378         port->irq = res->start;
1379
1380         sirfport->clk = clk_get(&pdev->dev, NULL);
1381         if (IS_ERR(sirfport->clk)) {
1382                 ret = PTR_ERR(sirfport->clk);
1383                 goto err;
1384         }
1385         port->uartclk = clk_get_rate(sirfport->clk);
1386
1387         port->ops = &sirfsoc_uart_ops;
1388         spin_lock_init(&port->lock);
1389
1390         platform_set_drvdata(pdev, sirfport);
1391         ret = uart_add_one_port(&sirfsoc_uart_drv, port);
1392         if (ret != 0) {
1393                 dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
1394                 goto port_err;
1395         }
1396
1397         sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx");
1398         for (i = 0; sirfport->rx_dma_chan && i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
1399                 sirfport->rx_dma_items[i].xmit.buf =
1400                         dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1401                         &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
1402                 if (!sirfport->rx_dma_items[i].xmit.buf) {
1403                         dev_err(port->dev, "Uart alloc bufa failed\n");
1404                         ret = -ENOMEM;
1405                         goto alloc_coherent_err;
1406                 }
1407                 sirfport->rx_dma_items[i].xmit.head =
1408                         sirfport->rx_dma_items[i].xmit.tail = 0;
1409         }
1410         if (sirfport->rx_dma_chan)
1411                 dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
1412         sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx");
1413         if (sirfport->tx_dma_chan)
1414                 dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
1415
1416         return 0;
1417 alloc_coherent_err:
1418         for (j = 0; j < i; j++)
1419                 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1420                                 sirfport->rx_dma_items[j].xmit.buf,
1421                                 sirfport->rx_dma_items[j].dma_addr);
1422         dma_release_channel(sirfport->rx_dma_chan);
1423 port_err:
1424         clk_put(sirfport->clk);
1425 err:
1426         return ret;
1427 }
1428
1429 static int sirfsoc_uart_remove(struct platform_device *pdev)
1430 {
1431         struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
1432         struct uart_port *port = &sirfport->port;
1433         clk_put(sirfport->clk);
1434         uart_remove_one_port(&sirfsoc_uart_drv, port);
1435         if (sirfport->rx_dma_chan) {
1436                 int i;
1437                 dmaengine_terminate_all(sirfport->rx_dma_chan);
1438                 dma_release_channel(sirfport->rx_dma_chan);
1439                 for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
1440                         dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1441                                         sirfport->rx_dma_items[i].xmit.buf,
1442                                         sirfport->rx_dma_items[i].dma_addr);
1443         }
1444         if (sirfport->tx_dma_chan) {
1445                 dmaengine_terminate_all(sirfport->tx_dma_chan);
1446                 dma_release_channel(sirfport->tx_dma_chan);
1447         }
1448         return 0;
1449 }
1450
1451 #ifdef CONFIG_PM_SLEEP
1452 static int
1453 sirfsoc_uart_suspend(struct device *pdev)
1454 {
1455         struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
1456         struct uart_port *port = &sirfport->port;
1457         uart_suspend_port(&sirfsoc_uart_drv, port);
1458         return 0;
1459 }
1460
1461 static int sirfsoc_uart_resume(struct device *pdev)
1462 {
1463         struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
1464         struct uart_port *port = &sirfport->port;
1465         uart_resume_port(&sirfsoc_uart_drv, port);
1466         return 0;
1467 }
1468 #endif
1469
1470 static const struct dev_pm_ops sirfsoc_uart_pm_ops = {
1471         SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume)
1472 };
1473
1474 static struct platform_driver sirfsoc_uart_driver = {
1475         .probe          = sirfsoc_uart_probe,
1476         .remove         = sirfsoc_uart_remove,
1477         .driver         = {
1478                 .name   = SIRFUART_PORT_NAME,
1479                 .owner  = THIS_MODULE,
1480                 .of_match_table = sirfsoc_uart_ids,
1481                 .pm     = &sirfsoc_uart_pm_ops,
1482         },
1483 };
1484
1485 static int __init sirfsoc_uart_init(void)
1486 {
1487         int ret = 0;
1488
1489         ret = uart_register_driver(&sirfsoc_uart_drv);
1490         if (ret)
1491                 goto out;
1492
1493         ret = platform_driver_register(&sirfsoc_uart_driver);
1494         if (ret)
1495                 uart_unregister_driver(&sirfsoc_uart_drv);
1496 out:
1497         return ret;
1498 }
1499 module_init(sirfsoc_uart_init);
1500
1501 static void __exit sirfsoc_uart_exit(void)
1502 {
1503         platform_driver_unregister(&sirfsoc_uart_driver);
1504         uart_unregister_driver(&sirfsoc_uart_drv);
1505 }
1506 module_exit(sirfsoc_uart_exit);
1507
1508 MODULE_LICENSE("GPL v2");
1509 MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
1510 MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");