HSI: omap_ssi_port: replace pm_runtime_put_sync with non-sync variant
[cascardo/linux.git] / drivers / hsi / controllers / omap_ssi_port.c
1 /* OMAP SSI port driver.
2  *
3  * Copyright (C) 2010 Nokia Corporation. All rights reserved.
4  * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
5  *
6  * Contact: Carlos Chinea <carlos.chinea@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  */
22
23 #include <linux/platform_device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/delay.h>
27
28 #include <linux/gpio/consumer.h>
29 #include <linux/pinctrl/consumer.h>
30 #include <linux/debugfs.h>
31
32 #include "omap_ssi_regs.h"
33 #include "omap_ssi.h"
34
35 static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
36 {
37         return 0;
38 }
39
40 static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
41 {
42         return 0;
43 }
44
45 static inline unsigned int ssi_wakein(struct hsi_port *port)
46 {
47         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
48         return gpiod_get_value(omap_port->wake_gpio);
49 }
50
51 #ifdef CONFIG_DEBUG_FS
52 static void ssi_debug_remove_port(struct hsi_port *port)
53 {
54         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
55
56         debugfs_remove_recursive(omap_port->dir);
57 }
58
59 static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
60 {
61         struct hsi_port *port = m->private;
62         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
63         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
64         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
65         void __iomem    *base = omap_ssi->sys;
66         unsigned int ch;
67
68         pm_runtime_get_sync(omap_port->pdev);
69         if (omap_port->wake_irq > 0)
70                 seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
71         seq_printf(m, "WAKE\t\t: 0x%08x\n",
72                                 readl(base + SSI_WAKE_REG(port->num)));
73         seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
74                         readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
75         seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
76                         readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
77         /* SST */
78         base = omap_port->sst_base;
79         seq_puts(m, "\nSST\n===\n");
80         seq_printf(m, "ID SST\t\t: 0x%08x\n",
81                                 readl(base + SSI_SST_ID_REG));
82         seq_printf(m, "MODE\t\t: 0x%08x\n",
83                                 readl(base + SSI_SST_MODE_REG));
84         seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
85                                 readl(base + SSI_SST_FRAMESIZE_REG));
86         seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
87                                 readl(base + SSI_SST_DIVISOR_REG));
88         seq_printf(m, "CHANNELS\t: 0x%08x\n",
89                                 readl(base + SSI_SST_CHANNELS_REG));
90         seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
91                                 readl(base + SSI_SST_ARBMODE_REG));
92         seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
93                                 readl(base + SSI_SST_TXSTATE_REG));
94         seq_printf(m, "BUFSTATE\t: 0x%08x\n",
95                                 readl(base + SSI_SST_BUFSTATE_REG));
96         seq_printf(m, "BREAK\t\t: 0x%08x\n",
97                                 readl(base + SSI_SST_BREAK_REG));
98         for (ch = 0; ch < omap_port->channels; ch++) {
99                 seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
100                                 readl(base + SSI_SST_BUFFER_CH_REG(ch)));
101         }
102         /* SSR */
103         base = omap_port->ssr_base;
104         seq_puts(m, "\nSSR\n===\n");
105         seq_printf(m, "ID SSR\t\t: 0x%08x\n",
106                                 readl(base + SSI_SSR_ID_REG));
107         seq_printf(m, "MODE\t\t: 0x%08x\n",
108                                 readl(base + SSI_SSR_MODE_REG));
109         seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
110                                 readl(base + SSI_SSR_FRAMESIZE_REG));
111         seq_printf(m, "CHANNELS\t: 0x%08x\n",
112                                 readl(base + SSI_SSR_CHANNELS_REG));
113         seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
114                                 readl(base + SSI_SSR_TIMEOUT_REG));
115         seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
116                                 readl(base + SSI_SSR_RXSTATE_REG));
117         seq_printf(m, "BUFSTATE\t: 0x%08x\n",
118                                 readl(base + SSI_SSR_BUFSTATE_REG));
119         seq_printf(m, "BREAK\t\t: 0x%08x\n",
120                                 readl(base + SSI_SSR_BREAK_REG));
121         seq_printf(m, "ERROR\t\t: 0x%08x\n",
122                                 readl(base + SSI_SSR_ERROR_REG));
123         seq_printf(m, "ERRORACK\t: 0x%08x\n",
124                                 readl(base + SSI_SSR_ERRORACK_REG));
125         for (ch = 0; ch < omap_port->channels; ch++) {
126                 seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
127                                 readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
128         }
129         pm_runtime_put_sync(omap_port->pdev);
130
131         return 0;
132 }
133
134 static int ssi_port_regs_open(struct inode *inode, struct file *file)
135 {
136         return single_open(file, ssi_debug_port_show, inode->i_private);
137 }
138
139 static const struct file_operations ssi_port_regs_fops = {
140         .open           = ssi_port_regs_open,
141         .read           = seq_read,
142         .llseek         = seq_lseek,
143         .release        = single_release,
144 };
145
146 static int ssi_div_get(void *data, u64 *val)
147 {
148         struct hsi_port *port = data;
149         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
150
151         pm_runtime_get_sync(omap_port->pdev);
152         *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
153         pm_runtime_put_sync(omap_port->pdev);
154
155         return 0;
156 }
157
158 static int ssi_div_set(void *data, u64 val)
159 {
160         struct hsi_port *port = data;
161         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
162
163         if (val > 127)
164                 return -EINVAL;
165
166         pm_runtime_get_sync(omap_port->pdev);
167         writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
168         omap_port->sst.divisor = val;
169         pm_runtime_put_sync(omap_port->pdev);
170
171         return 0;
172 }
173
174 DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
175
176 static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
177                                      struct dentry *dir)
178 {
179         struct hsi_port *port = to_hsi_port(omap_port->dev);
180
181         dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
182         if (!dir)
183                 return -ENOMEM;
184         omap_port->dir = dir;
185         debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
186         dir = debugfs_create_dir("sst", dir);
187         if (!dir)
188                 return -ENOMEM;
189         debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
190                             &ssi_sst_div_fops);
191
192         return 0;
193 }
194 #endif
195
196 static int ssi_claim_lch(struct hsi_msg *msg)
197 {
198
199         struct hsi_port *port = hsi_get_port(msg->cl);
200         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
201         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
202         int lch;
203
204         for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
205                 if (!omap_ssi->gdd_trn[lch].msg) {
206                         omap_ssi->gdd_trn[lch].msg = msg;
207                         omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
208                         return lch;
209                 }
210
211         return -EBUSY;
212 }
213
214 static int ssi_start_dma(struct hsi_msg *msg, int lch)
215 {
216         struct hsi_port *port = hsi_get_port(msg->cl);
217         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
218         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
219         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
220         void __iomem *gdd = omap_ssi->gdd;
221         int err;
222         u16 csdp;
223         u16 ccr;
224         u32 s_addr;
225         u32 d_addr;
226         u32 tmp;
227
228         /* Hold clocks during the transfer */
229         pm_runtime_get(omap_port->pdev);
230
231         if (!pm_runtime_active(omap_port->pdev)) {
232                 dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
233                 pm_runtime_put(omap_port->pdev);
234                 return -EREMOTEIO;
235         }
236
237         if (msg->ttype == HSI_MSG_READ) {
238                 err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
239                                                         DMA_FROM_DEVICE);
240                 if (err < 0) {
241                         dev_dbg(&ssi->device, "DMA map SG failed !\n");
242                         pm_runtime_put(omap_port->pdev);
243                         return err;
244                 }
245                 csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
246                         SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
247                         SSI_DATA_TYPE_S32;
248                 ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
249                 ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
250                         SSI_CCR_ENABLE;
251                 s_addr = omap_port->ssr_dma +
252                                         SSI_SSR_BUFFER_CH_REG(msg->channel);
253                 d_addr = sg_dma_address(msg->sgt.sgl);
254         } else {
255                 err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
256                                                         DMA_TO_DEVICE);
257                 if (err < 0) {
258                         dev_dbg(&ssi->device, "DMA map SG failed !\n");
259                         pm_runtime_put(omap_port->pdev);
260                         return err;
261                 }
262                 csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
263                         SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
264                         SSI_DATA_TYPE_S32;
265                 ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
266                 ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
267                         SSI_CCR_ENABLE;
268                 s_addr = sg_dma_address(msg->sgt.sgl);
269                 d_addr = omap_port->sst_dma +
270                                         SSI_SST_BUFFER_CH_REG(msg->channel);
271         }
272         dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
273                 lch, csdp, ccr, s_addr, d_addr);
274
275         writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
276         writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
277         writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
278         writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
279         writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
280                                                 gdd + SSI_GDD_CEN_REG(lch));
281
282         spin_lock_bh(&omap_ssi->lock);
283         tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
284         tmp |= SSI_GDD_LCH(lch);
285         writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
286         spin_unlock_bh(&omap_ssi->lock);
287         writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
288         msg->status = HSI_STATUS_PROCEEDING;
289
290         return 0;
291 }
292
293 static int ssi_start_pio(struct hsi_msg *msg)
294 {
295         struct hsi_port *port = hsi_get_port(msg->cl);
296         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
297         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
298         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
299         u32 val;
300
301         pm_runtime_get(omap_port->pdev);
302
303         if (!pm_runtime_active(omap_port->pdev)) {
304                 dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
305                 pm_runtime_put(omap_port->pdev);
306                 return -EREMOTEIO;
307         }
308
309         if (msg->ttype == HSI_MSG_WRITE) {
310                 val = SSI_DATAACCEPT(msg->channel);
311                 /* Hold clocks for pio writes */
312                 pm_runtime_get(omap_port->pdev);
313         } else {
314                 val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
315         }
316         dev_dbg(&port->device, "Single %s transfer\n",
317                                                 msg->ttype ? "write" : "read");
318         val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
319         writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
320         pm_runtime_put(omap_port->pdev);
321         msg->actual_len = 0;
322         msg->status = HSI_STATUS_PROCEEDING;
323
324         return 0;
325 }
326
327 static int ssi_start_transfer(struct list_head *queue)
328 {
329         struct hsi_msg *msg;
330         int lch = -1;
331
332         if (list_empty(queue))
333                 return 0;
334         msg = list_first_entry(queue, struct hsi_msg, link);
335         if (msg->status != HSI_STATUS_QUEUED)
336                 return 0;
337         if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
338                 lch = ssi_claim_lch(msg);
339         if (lch >= 0)
340                 return ssi_start_dma(msg, lch);
341         else
342                 return ssi_start_pio(msg);
343 }
344
345 static int ssi_async_break(struct hsi_msg *msg)
346 {
347         struct hsi_port *port = hsi_get_port(msg->cl);
348         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
349         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
350         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
351         int err = 0;
352         u32 tmp;
353
354         pm_runtime_get_sync(omap_port->pdev);
355         if (msg->ttype == HSI_MSG_WRITE) {
356                 if (omap_port->sst.mode != SSI_MODE_FRAME) {
357                         err = -EINVAL;
358                         goto out;
359                 }
360                 writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
361                 msg->status = HSI_STATUS_COMPLETED;
362                 msg->complete(msg);
363         } else {
364                 if (omap_port->ssr.mode != SSI_MODE_FRAME) {
365                         err = -EINVAL;
366                         goto out;
367                 }
368                 spin_lock_bh(&omap_port->lock);
369                 tmp = readl(omap_ssi->sys +
370                                         SSI_MPU_ENABLE_REG(port->num, 0));
371                 writel(tmp | SSI_BREAKDETECTED,
372                         omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
373                 msg->status = HSI_STATUS_PROCEEDING;
374                 list_add_tail(&msg->link, &omap_port->brkqueue);
375                 spin_unlock_bh(&omap_port->lock);
376         }
377 out:
378         pm_runtime_put(omap_port->pdev);
379
380         return err;
381 }
382
383 static int ssi_async(struct hsi_msg *msg)
384 {
385         struct hsi_port *port = hsi_get_port(msg->cl);
386         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
387         struct list_head *queue;
388         int err = 0;
389
390         BUG_ON(!msg);
391
392         if (msg->sgt.nents > 1)
393                 return -ENOSYS; /* TODO: Add sg support */
394
395         if (msg->break_frame)
396                 return ssi_async_break(msg);
397
398         if (msg->ttype) {
399                 BUG_ON(msg->channel >= omap_port->sst.channels);
400                 queue = &omap_port->txqueue[msg->channel];
401         } else {
402                 BUG_ON(msg->channel >= omap_port->ssr.channels);
403                 queue = &omap_port->rxqueue[msg->channel];
404         }
405         msg->status = HSI_STATUS_QUEUED;
406
407         pm_runtime_get_sync(omap_port->pdev);
408         spin_lock_bh(&omap_port->lock);
409         list_add_tail(&msg->link, queue);
410         err = ssi_start_transfer(queue);
411         if (err < 0) {
412                 list_del(&msg->link);
413                 msg->status = HSI_STATUS_ERROR;
414         }
415         spin_unlock_bh(&omap_port->lock);
416         pm_runtime_put(omap_port->pdev);
417         dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
418                                 msg->status, msg->ttype, msg->channel);
419
420         return err;
421 }
422
423 static u32 ssi_calculate_div(struct hsi_controller *ssi)
424 {
425         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
426         u32 tx_fckrate = (u32) omap_ssi->fck_rate;
427
428         /* / 2 : SSI TX clock is always half of the SSI functional clock */
429         tx_fckrate >>= 1;
430         /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
431         tx_fckrate--;
432         dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
433                 tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
434                 omap_ssi->max_speed);
435
436         return tx_fckrate / omap_ssi->max_speed;
437 }
438
439 static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
440 {
441         struct list_head *node, *tmp;
442         struct hsi_msg *msg;
443
444         list_for_each_safe(node, tmp, queue) {
445                 msg = list_entry(node, struct hsi_msg, link);
446                 if ((cl) && (cl != msg->cl))
447                         continue;
448                 list_del(node);
449                 pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
450                         msg->channel, msg, msg->sgt.sgl->length,
451                                         msg->ttype, msg->context);
452                 if (msg->destructor)
453                         msg->destructor(msg);
454                 else
455                         hsi_free_msg(msg);
456         }
457 }
458
459 static int ssi_setup(struct hsi_client *cl)
460 {
461         struct hsi_port *port = to_hsi_port(cl->device.parent);
462         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
463         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
464         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
465         void __iomem *sst = omap_port->sst_base;
466         void __iomem *ssr = omap_port->ssr_base;
467         u32 div;
468         u32 val;
469         int err = 0;
470
471         pm_runtime_get_sync(omap_port->pdev);
472         spin_lock_bh(&omap_port->lock);
473         if (cl->tx_cfg.speed)
474                 omap_ssi->max_speed = cl->tx_cfg.speed;
475         div = ssi_calculate_div(ssi);
476         if (div > SSI_MAX_DIVISOR) {
477                 dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
478                                                 cl->tx_cfg.speed, div);
479                 err = -EINVAL;
480                 goto out;
481         }
482         /* Set TX/RX module to sleep to stop TX/RX during cfg update */
483         writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
484         writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
485         /* Flush posted write */
486         val = readl(ssr + SSI_SSR_MODE_REG);
487         /* TX */
488         writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
489         writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
490         writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
491         writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
492         writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
493         /* RX */
494         writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
495         writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
496         writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
497         /* Cleanup the break queue if we leave FRAME mode */
498         if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
499                 (cl->rx_cfg.mode != SSI_MODE_FRAME))
500                 ssi_flush_queue(&omap_port->brkqueue, cl);
501         writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
502         omap_port->channels = max(cl->rx_cfg.num_hw_channels,
503                                   cl->tx_cfg.num_hw_channels);
504         /* Shadow registering for OFF mode */
505         /* SST */
506         omap_port->sst.divisor = div;
507         omap_port->sst.frame_size = 31;
508         omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
509         omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
510         omap_port->sst.mode = cl->tx_cfg.mode;
511         /* SSR */
512         omap_port->ssr.frame_size = 31;
513         omap_port->ssr.timeout = 0;
514         omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
515         omap_port->ssr.mode = cl->rx_cfg.mode;
516 out:
517         spin_unlock_bh(&omap_port->lock);
518         pm_runtime_put(omap_port->pdev);
519
520         return err;
521 }
522
523 static int ssi_flush(struct hsi_client *cl)
524 {
525         struct hsi_port *port = hsi_get_port(cl);
526         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
527         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
528         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
529         struct hsi_msg *msg;
530         void __iomem *sst = omap_port->sst_base;
531         void __iomem *ssr = omap_port->ssr_base;
532         unsigned int i;
533         u32 err;
534
535         pm_runtime_get_sync(omap_port->pdev);
536         spin_lock_bh(&omap_port->lock);
537
538         /* stop all ssi communication */
539         pinctrl_pm_select_idle_state(omap_port->pdev);
540         udelay(1); /* wait for racing frames */
541
542         /* Stop all DMA transfers */
543         for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
544                 msg = omap_ssi->gdd_trn[i].msg;
545                 if (!msg || (port != hsi_get_port(msg->cl)))
546                         continue;
547                 writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
548                 if (msg->ttype == HSI_MSG_READ)
549                         pm_runtime_put(omap_port->pdev);
550                 omap_ssi->gdd_trn[i].msg = NULL;
551         }
552         /* Flush all SST buffers */
553         writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
554         writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
555         /* Flush all SSR buffers */
556         writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
557         writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
558         /* Flush all errors */
559         err = readl(ssr + SSI_SSR_ERROR_REG);
560         writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
561         /* Flush break */
562         writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
563         /* Clear interrupts */
564         writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
565         writel_relaxed(0xffffff00,
566                         omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
567         writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
568         writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
569         /* Dequeue all pending requests */
570         for (i = 0; i < omap_port->channels; i++) {
571                 /* Release write clocks */
572                 if (!list_empty(&omap_port->txqueue[i]))
573                         pm_runtime_put(omap_port->pdev);
574                 ssi_flush_queue(&omap_port->txqueue[i], NULL);
575                 ssi_flush_queue(&omap_port->rxqueue[i], NULL);
576         }
577         ssi_flush_queue(&omap_port->brkqueue, NULL);
578
579         /* Resume SSI communication */
580         pinctrl_pm_select_default_state(omap_port->pdev);
581
582         spin_unlock_bh(&omap_port->lock);
583         pm_runtime_put(omap_port->pdev);
584
585         return 0;
586 }
587
588 static void start_tx_work(struct work_struct *work)
589 {
590         struct omap_ssi_port *omap_port =
591                                 container_of(work, struct omap_ssi_port, work);
592         struct hsi_port *port = to_hsi_port(omap_port->dev);
593         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
594         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
595
596         pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
597         writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
598 }
599
600 static int ssi_start_tx(struct hsi_client *cl)
601 {
602         struct hsi_port *port = hsi_get_port(cl);
603         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
604
605         dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
606
607         spin_lock_bh(&omap_port->wk_lock);
608         if (omap_port->wk_refcount++) {
609                 spin_unlock_bh(&omap_port->wk_lock);
610                 return 0;
611         }
612         spin_unlock_bh(&omap_port->wk_lock);
613
614         schedule_work(&omap_port->work);
615
616         return 0;
617 }
618
619 static int ssi_stop_tx(struct hsi_client *cl)
620 {
621         struct hsi_port *port = hsi_get_port(cl);
622         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
623         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
624         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
625
626         dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
627
628         spin_lock_bh(&omap_port->wk_lock);
629         BUG_ON(!omap_port->wk_refcount);
630         if (--omap_port->wk_refcount) {
631                 spin_unlock_bh(&omap_port->wk_lock);
632                 return 0;
633         }
634         writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
635         spin_unlock_bh(&omap_port->wk_lock);
636
637         pm_runtime_put(omap_port->pdev); /* Release clocks */
638
639         return 0;
640 }
641
642 static void ssi_transfer(struct omap_ssi_port *omap_port,
643                                                         struct list_head *queue)
644 {
645         struct hsi_msg *msg;
646         int err = -1;
647
648         spin_lock_bh(&omap_port->lock);
649         while (err < 0) {
650                 err = ssi_start_transfer(queue);
651                 if (err < 0) {
652                         msg = list_first_entry(queue, struct hsi_msg, link);
653                         msg->status = HSI_STATUS_ERROR;
654                         msg->actual_len = 0;
655                         list_del(&msg->link);
656                         spin_unlock_bh(&omap_port->lock);
657                         msg->complete(msg);
658                         spin_lock_bh(&omap_port->lock);
659                 }
660         }
661         spin_unlock_bh(&omap_port->lock);
662 }
663
664 static void ssi_cleanup_queues(struct hsi_client *cl)
665 {
666         struct hsi_port *port = hsi_get_port(cl);
667         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
668         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
669         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
670         struct hsi_msg *msg;
671         unsigned int i;
672         u32 rxbufstate = 0;
673         u32 txbufstate = 0;
674         u32 status = SSI_ERROROCCURED;
675         u32 tmp;
676
677         ssi_flush_queue(&omap_port->brkqueue, cl);
678         if (list_empty(&omap_port->brkqueue))
679                 status |= SSI_BREAKDETECTED;
680
681         for (i = 0; i < omap_port->channels; i++) {
682                 if (list_empty(&omap_port->txqueue[i]))
683                         continue;
684                 msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
685                                                                         link);
686                 if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
687                         txbufstate |= (1 << i);
688                         status |= SSI_DATAACCEPT(i);
689                         /* Release the clocks writes, also GDD ones */
690                         pm_runtime_put(omap_port->pdev);
691                 }
692                 ssi_flush_queue(&omap_port->txqueue[i], cl);
693         }
694         for (i = 0; i < omap_port->channels; i++) {
695                 if (list_empty(&omap_port->rxqueue[i]))
696                         continue;
697                 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
698                                                                         link);
699                 if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
700                         rxbufstate |= (1 << i);
701                         status |= SSI_DATAAVAILABLE(i);
702                 }
703                 ssi_flush_queue(&omap_port->rxqueue[i], cl);
704                 /* Check if we keep the error detection interrupt armed */
705                 if (!list_empty(&omap_port->rxqueue[i]))
706                         status &= ~SSI_ERROROCCURED;
707         }
708         /* Cleanup write buffers */
709         tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
710         tmp &= ~txbufstate;
711         writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
712         /* Cleanup read buffers */
713         tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
714         tmp &= ~rxbufstate;
715         writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
716         /* Disarm and ack pending interrupts */
717         tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
718         tmp &= ~status;
719         writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
720         writel_relaxed(status, omap_ssi->sys +
721                 SSI_MPU_STATUS_REG(port->num, 0));
722 }
723
724 static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
725 {
726         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
727         struct hsi_port *port = hsi_get_port(cl);
728         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
729         struct hsi_msg *msg;
730         unsigned int i;
731         u32 val = 0;
732         u32 tmp;
733
734         for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
735                 msg = omap_ssi->gdd_trn[i].msg;
736                 if ((!msg) || (msg->cl != cl))
737                         continue;
738                 writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
739                 val |= (1 << i);
740                 /*
741                  * Clock references for write will be handled in
742                  * ssi_cleanup_queues
743                  */
744                 if (msg->ttype == HSI_MSG_READ)
745                         pm_runtime_put(omap_port->pdev);
746                 omap_ssi->gdd_trn[i].msg = NULL;
747         }
748         tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
749         tmp &= ~val;
750         writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
751         writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
752 }
753
754 static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
755 {
756         writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
757         writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
758         /* OCP barrier */
759         mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
760
761         return 0;
762 }
763
764 static int ssi_release(struct hsi_client *cl)
765 {
766         struct hsi_port *port = hsi_get_port(cl);
767         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
768         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
769
770         pm_runtime_get_sync(omap_port->pdev);
771         spin_lock_bh(&omap_port->lock);
772         /* Stop all the pending DMA requests for that client */
773         ssi_cleanup_gdd(ssi, cl);
774         /* Now cleanup all the queues */
775         ssi_cleanup_queues(cl);
776         /* If it is the last client of the port, do extra checks and cleanup */
777         if (port->claimed <= 1) {
778                 /*
779                  * Drop the clock reference for the incoming wake line
780                  * if it is still kept high by the other side.
781                  */
782                 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
783                         pm_runtime_put_sync(omap_port->pdev);
784                 pm_runtime_get(omap_port->pdev);
785                 /* Stop any SSI TX/RX without a client */
786                 ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
787                 omap_port->sst.mode = SSI_MODE_SLEEP;
788                 omap_port->ssr.mode = SSI_MODE_SLEEP;
789                 pm_runtime_put(omap_port->pdev);
790                 WARN_ON(omap_port->wk_refcount != 0);
791         }
792         spin_unlock_bh(&omap_port->lock);
793         pm_runtime_put(omap_port->pdev);
794
795         return 0;
796 }
797
798
799
800 static void ssi_error(struct hsi_port *port)
801 {
802         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
803         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
804         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
805         struct hsi_msg *msg;
806         unsigned int i;
807         u32 err;
808         u32 val;
809         u32 tmp;
810
811         /* ACK error */
812         err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
813         dev_err(&port->device, "SSI error: 0x%02x\n", err);
814         if (!err) {
815                 dev_dbg(&port->device, "spurious SSI error ignored!\n");
816                 return;
817         }
818         spin_lock(&omap_ssi->lock);
819         /* Cancel all GDD read transfers */
820         for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
821                 msg = omap_ssi->gdd_trn[i].msg;
822                 if ((msg) && (msg->ttype == HSI_MSG_READ)) {
823                         writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
824                         val |= (1 << i);
825                         omap_ssi->gdd_trn[i].msg = NULL;
826                 }
827         }
828         tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
829         tmp &= ~val;
830         writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
831         spin_unlock(&omap_ssi->lock);
832         /* Cancel all PIO read transfers */
833         spin_lock(&omap_port->lock);
834         tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
835         tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
836         writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
837         /* ACK error */
838         writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
839         writel_relaxed(SSI_ERROROCCURED,
840                         omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
841         /* Signal the error all current pending read requests */
842         for (i = 0; i < omap_port->channels; i++) {
843                 if (list_empty(&omap_port->rxqueue[i]))
844                         continue;
845                 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
846                                                                         link);
847                 list_del(&msg->link);
848                 msg->status = HSI_STATUS_ERROR;
849                 spin_unlock(&omap_port->lock);
850                 msg->complete(msg);
851                 /* Now restart queued reads if any */
852                 ssi_transfer(omap_port, &omap_port->rxqueue[i]);
853                 spin_lock(&omap_port->lock);
854         }
855         spin_unlock(&omap_port->lock);
856 }
857
858 static void ssi_break_complete(struct hsi_port *port)
859 {
860         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
861         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
862         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
863         struct hsi_msg *msg;
864         struct hsi_msg *tmp;
865         u32 val;
866
867         dev_dbg(&port->device, "HWBREAK received\n");
868
869         spin_lock(&omap_port->lock);
870         val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
871         val &= ~SSI_BREAKDETECTED;
872         writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
873         writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
874         writel(SSI_BREAKDETECTED,
875                         omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
876         spin_unlock(&omap_port->lock);
877
878         list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
879                 msg->status = HSI_STATUS_COMPLETED;
880                 spin_lock(&omap_port->lock);
881                 list_del(&msg->link);
882                 spin_unlock(&omap_port->lock);
883                 msg->complete(msg);
884         }
885
886 }
887
888 static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
889 {
890         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
891         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
892         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
893         struct hsi_msg *msg;
894         u32 *buf;
895         u32 reg;
896         u32 val;
897
898         spin_lock_bh(&omap_port->lock);
899         msg = list_first_entry(queue, struct hsi_msg, link);
900         if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
901                 msg->actual_len = 0;
902                 msg->status = HSI_STATUS_PENDING;
903         }
904         if (msg->ttype == HSI_MSG_WRITE)
905                 val = SSI_DATAACCEPT(msg->channel);
906         else
907                 val = SSI_DATAAVAILABLE(msg->channel);
908         if (msg->status == HSI_STATUS_PROCEEDING) {
909                 buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
910                 if (msg->ttype == HSI_MSG_WRITE)
911                         writel(*buf, omap_port->sst_base +
912                                         SSI_SST_BUFFER_CH_REG(msg->channel));
913                  else
914                         *buf = readl(omap_port->ssr_base +
915                                         SSI_SSR_BUFFER_CH_REG(msg->channel));
916                 dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
917                                                         msg->ttype, *buf);
918                 msg->actual_len += sizeof(*buf);
919                 if (msg->actual_len >= msg->sgt.sgl->length)
920                         msg->status = HSI_STATUS_COMPLETED;
921                 /*
922                  * Wait for the last written frame to be really sent before
923                  * we call the complete callback
924                  */
925                 if ((msg->status == HSI_STATUS_PROCEEDING) ||
926                                 ((msg->status == HSI_STATUS_COMPLETED) &&
927                                         (msg->ttype == HSI_MSG_WRITE))) {
928                         writel(val, omap_ssi->sys +
929                                         SSI_MPU_STATUS_REG(port->num, 0));
930                         spin_unlock_bh(&omap_port->lock);
931
932                         return;
933                 }
934
935         }
936         /* Transfer completed at this point */
937         reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
938         if (msg->ttype == HSI_MSG_WRITE) {
939                 /* Release clocks for write transfer */
940                 pm_runtime_put(omap_port->pdev);
941         }
942         reg &= ~val;
943         writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
944         writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
945         list_del(&msg->link);
946         spin_unlock_bh(&omap_port->lock);
947         msg->complete(msg);
948         ssi_transfer(omap_port, queue);
949 }
950
951 static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
952 {
953         struct hsi_port *port = (struct hsi_port *)ssi_port;
954         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
955         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
956         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
957         void __iomem *sys = omap_ssi->sys;
958         unsigned int ch;
959         u32 status_reg;
960
961         pm_runtime_get_sync(omap_port->pdev);
962
963         do {
964                 status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
965                 status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
966
967                 for (ch = 0; ch < omap_port->channels; ch++) {
968                         if (status_reg & SSI_DATAACCEPT(ch))
969                                 ssi_pio_complete(port, &omap_port->txqueue[ch]);
970                         if (status_reg & SSI_DATAAVAILABLE(ch))
971                                 ssi_pio_complete(port, &omap_port->rxqueue[ch]);
972                 }
973                 if (status_reg & SSI_BREAKDETECTED)
974                         ssi_break_complete(port);
975                 if (status_reg & SSI_ERROROCCURED)
976                         ssi_error(port);
977
978                 status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
979                 status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
980
981                 /* TODO: sleep if we retry? */
982         } while (status_reg);
983
984         pm_runtime_put(omap_port->pdev);
985         return IRQ_HANDLED;
986 }
987
988 static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
989 {
990         struct hsi_port *port = (struct hsi_port *)ssi_port;
991         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
992         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
993         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
994
995         if (ssi_wakein(port)) {
996                 /**
997                  * We can have a quick High-Low-High transition in the line.
998                  * In such a case if we have long interrupt latencies,
999                  * we can miss the low event or get twice a high event.
1000                  * This workaround will avoid breaking the clock reference
1001                  * count when such a situation ocurrs.
1002                  */
1003                 if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
1004                         pm_runtime_get_sync(omap_port->pdev);
1005                 dev_dbg(&ssi->device, "Wake in high\n");
1006                 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1007                         writel(SSI_WAKE(0),
1008                                 omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
1009                 }
1010                 hsi_event(port, HSI_EVENT_START_RX);
1011         } else {
1012                 dev_dbg(&ssi->device, "Wake in low\n");
1013                 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1014                         writel(SSI_WAKE(0),
1015                                 omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
1016                 }
1017                 hsi_event(port, HSI_EVENT_STOP_RX);
1018                 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
1019                         pm_runtime_put_sync(omap_port->pdev);
1020         }
1021
1022         return IRQ_HANDLED;
1023 }
1024
1025 static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
1026 {
1027         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1028         int err;
1029
1030         err = platform_get_irq(pd, 0);
1031         if (err < 0) {
1032                 dev_err(&port->device, "Port IRQ resource missing\n");
1033                 return err;
1034         }
1035         omap_port->irq = err;
1036         err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
1037                                 ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
1038         if (err < 0)
1039                 dev_err(&port->device, "Request IRQ %d failed (%d)\n",
1040                                                         omap_port->irq, err);
1041         return err;
1042 }
1043
1044 static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
1045 {
1046         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1047         int cawake_irq;
1048         int err;
1049
1050         if (!omap_port->wake_gpio) {
1051                 omap_port->wake_irq = -1;
1052                 return 0;
1053         }
1054
1055         cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
1056         omap_port->wake_irq = cawake_irq;
1057
1058         err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
1059                 ssi_wake_thread,
1060                 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1061                 "SSI cawake", port);
1062         if (err < 0)
1063                 dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
1064                                                 cawake_irq, err);
1065         err = enable_irq_wake(cawake_irq);
1066         if (err < 0)
1067                 dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
1068                         cawake_irq, err);
1069
1070         return err;
1071 }
1072
1073 static void ssi_queues_init(struct omap_ssi_port *omap_port)
1074 {
1075         unsigned int ch;
1076
1077         for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
1078                 INIT_LIST_HEAD(&omap_port->txqueue[ch]);
1079                 INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
1080         }
1081         INIT_LIST_HEAD(&omap_port->brkqueue);
1082 }
1083
1084 static int ssi_port_get_iomem(struct platform_device *pd,
1085                 const char *name, void __iomem **pbase, dma_addr_t *phy)
1086 {
1087         struct hsi_port *port = platform_get_drvdata(pd);
1088         struct resource *mem;
1089         struct resource *ioarea;
1090         void __iomem *base;
1091
1092         mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
1093         if (!mem) {
1094                 dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
1095                 return -ENXIO;
1096         }
1097         ioarea = devm_request_mem_region(&port->device, mem->start,
1098                                         resource_size(mem), dev_name(&pd->dev));
1099         if (!ioarea) {
1100                 dev_err(&pd->dev, "%s IO memory region request failed\n",
1101                                                                 mem->name);
1102                 return -ENXIO;
1103         }
1104         base = devm_ioremap(&port->device, mem->start, resource_size(mem));
1105         if (!base) {
1106                 dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
1107                 return -ENXIO;
1108         }
1109         *pbase = base;
1110
1111         if (phy)
1112                 *phy = mem->start;
1113
1114         return 0;
1115 }
1116
1117 static int ssi_port_probe(struct platform_device *pd)
1118 {
1119         struct device_node *np = pd->dev.of_node;
1120         struct hsi_port *port;
1121         struct omap_ssi_port *omap_port;
1122         struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
1123         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1124         struct gpio_desc *cawake_gpio = NULL;
1125         u32 port_id;
1126         int err;
1127
1128         dev_dbg(&pd->dev, "init ssi port...\n");
1129
1130         if (!ssi->port || !omap_ssi->port) {
1131                 dev_err(&pd->dev, "ssi controller not initialized!\n");
1132                 err = -ENODEV;
1133                 goto error;
1134         }
1135
1136         /* get id of first uninitialized port in controller */
1137         for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
1138                 port_id++)
1139                 ;
1140
1141         if (port_id >= ssi->num_ports) {
1142                 dev_err(&pd->dev, "port id out of range!\n");
1143                 err = -ENODEV;
1144                 goto error;
1145         }
1146
1147         port = ssi->port[port_id];
1148
1149         if (!np) {
1150                 dev_err(&pd->dev, "missing device tree data\n");
1151                 err = -EINVAL;
1152                 goto error;
1153         }
1154
1155         cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
1156         if (IS_ERR(cawake_gpio)) {
1157                 err = PTR_ERR(cawake_gpio);
1158                 dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
1159                 goto error;
1160         }
1161
1162         omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
1163         if (!omap_port) {
1164                 err = -ENOMEM;
1165                 goto error;
1166         }
1167         omap_port->wake_gpio = cawake_gpio;
1168         omap_port->pdev = &pd->dev;
1169         omap_port->port_id = port_id;
1170
1171         INIT_WORK(&omap_port->work, start_tx_work);
1172
1173         /* initialize HSI port */
1174         port->async     = ssi_async;
1175         port->setup     = ssi_setup;
1176         port->flush     = ssi_flush;
1177         port->start_tx  = ssi_start_tx;
1178         port->stop_tx   = ssi_stop_tx;
1179         port->release   = ssi_release;
1180         hsi_port_set_drvdata(port, omap_port);
1181         omap_ssi->port[port_id] = omap_port;
1182
1183         platform_set_drvdata(pd, port);
1184
1185         err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
1186                 &omap_port->sst_dma);
1187         if (err < 0)
1188                 goto error;
1189         err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
1190                 &omap_port->ssr_dma);
1191         if (err < 0)
1192                 goto error;
1193
1194         err = ssi_port_irq(port, pd);
1195         if (err < 0)
1196                 goto error;
1197         err = ssi_wake_irq(port, pd);
1198         if (err < 0)
1199                 goto error;
1200
1201         ssi_queues_init(omap_port);
1202         spin_lock_init(&omap_port->lock);
1203         spin_lock_init(&omap_port->wk_lock);
1204         omap_port->dev = &port->device;
1205
1206         pm_runtime_irq_safe(omap_port->pdev);
1207         pm_runtime_enable(omap_port->pdev);
1208
1209 #ifdef CONFIG_DEBUG_FS
1210         err = ssi_debug_add_port(omap_port, omap_ssi->dir);
1211         if (err < 0) {
1212                 pm_runtime_disable(omap_port->pdev);
1213                 goto error;
1214         }
1215 #endif
1216
1217         hsi_add_clients_from_dt(port, np);
1218
1219         dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
1220
1221         return 0;
1222
1223 error:
1224         return err;
1225 }
1226
1227 static int ssi_port_remove(struct platform_device *pd)
1228 {
1229         struct hsi_port *port = platform_get_drvdata(pd);
1230         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1231         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1232         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1233
1234 #ifdef CONFIG_DEBUG_FS
1235         ssi_debug_remove_port(port);
1236 #endif
1237
1238         hsi_port_unregister_clients(port);
1239
1240         port->async     = hsi_dummy_msg;
1241         port->setup     = hsi_dummy_cl;
1242         port->flush     = hsi_dummy_cl;
1243         port->start_tx  = hsi_dummy_cl;
1244         port->stop_tx   = hsi_dummy_cl;
1245         port->release   = hsi_dummy_cl;
1246
1247         omap_ssi->port[omap_port->port_id] = NULL;
1248         platform_set_drvdata(pd, NULL);
1249         pm_runtime_disable(&pd->dev);
1250
1251         return 0;
1252 }
1253
1254 static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
1255 {
1256         writel_relaxed(omap_port->sst.divisor,
1257                                 omap_port->sst_base + SSI_SST_DIVISOR_REG);
1258
1259         return 0;
1260 }
1261
1262 void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
1263                                struct omap_ssi_port *omap_port)
1264 {
1265         /* update divisor */
1266         u32 div = ssi_calculate_div(ssi);
1267         omap_port->sst.divisor = div;
1268         ssi_restore_divisor(omap_port);
1269 }
1270
1271 #ifdef CONFIG_PM
1272 static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1273 {
1274         struct hsi_port *port = to_hsi_port(omap_port->dev);
1275         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1276         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1277
1278         omap_port->sys_mpu_enable = readl(omap_ssi->sys +
1279                                         SSI_MPU_ENABLE_REG(port->num, 0));
1280
1281         return 0;
1282 }
1283
1284 static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
1285 {
1286         struct hsi_port *port = to_hsi_port(omap_port->dev);
1287         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1288         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1289         void __iomem    *base;
1290
1291         writel_relaxed(omap_port->sys_mpu_enable,
1292                         omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
1293
1294         /* SST context */
1295         base = omap_port->sst_base;
1296         writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
1297         writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
1298         writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
1299
1300         /* SSR context */
1301         base = omap_port->ssr_base;
1302         writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
1303         writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
1304         writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
1305
1306         return 0;
1307 }
1308
1309 static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
1310 {
1311         u32 mode;
1312
1313         writel_relaxed(omap_port->sst.mode,
1314                                 omap_port->sst_base + SSI_SST_MODE_REG);
1315         writel_relaxed(omap_port->ssr.mode,
1316                                 omap_port->ssr_base + SSI_SSR_MODE_REG);
1317         /* OCP barrier */
1318         mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
1319
1320         return 0;
1321 }
1322
1323 static int omap_ssi_port_runtime_suspend(struct device *dev)
1324 {
1325         struct hsi_port *port = dev_get_drvdata(dev);
1326         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1327         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1328         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1329
1330         dev_dbg(dev, "port runtime suspend!\n");
1331
1332         ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
1333         if (omap_ssi->get_loss)
1334                 omap_port->loss_count =
1335                                 omap_ssi->get_loss(ssi->device.parent);
1336         ssi_save_port_ctx(omap_port);
1337
1338         return 0;
1339 }
1340
1341 static int omap_ssi_port_runtime_resume(struct device *dev)
1342 {
1343         struct hsi_port *port = dev_get_drvdata(dev);
1344         struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1345         struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1346         struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1347
1348         dev_dbg(dev, "port runtime resume!\n");
1349
1350         if ((omap_ssi->get_loss) && (omap_port->loss_count ==
1351                                 omap_ssi->get_loss(ssi->device.parent)))
1352                 goto mode; /* We always need to restore the mode & TX divisor */
1353
1354         ssi_restore_port_ctx(omap_port);
1355
1356 mode:
1357         ssi_restore_divisor(omap_port);
1358         ssi_restore_port_mode(omap_port);
1359
1360         return 0;
1361 }
1362
1363 static const struct dev_pm_ops omap_ssi_port_pm_ops = {
1364         SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
1365                 omap_ssi_port_runtime_resume, NULL)
1366 };
1367
1368 #define DEV_PM_OPS     (&omap_ssi_port_pm_ops)
1369 #else
1370 #define DEV_PM_OPS     NULL
1371 #endif
1372
1373
1374 #ifdef CONFIG_OF
1375 static const struct of_device_id omap_ssi_port_of_match[] = {
1376         { .compatible = "ti,omap3-ssi-port", },
1377         {},
1378 };
1379 MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
1380 #else
1381 #define omap_ssi_port_of_match NULL
1382 #endif
1383
1384 struct platform_driver ssi_port_pdriver = {
1385         .probe = ssi_port_probe,
1386         .remove = ssi_port_remove,
1387         .driver = {
1388                 .name   = "omap_ssi_port",
1389                 .of_match_table = omap_ssi_port_of_match,
1390                 .pm     = DEV_PM_OPS,
1391         },
1392 };