525ebfefeee829c644a74d279d888a7fc60e9ca3
[cascardo/linux.git] / drivers / media / pci / netup_unidvb / netup_unidvb_core.c
1 /*
2  * netup_unidvb_core.c
3  *
4  * Main module for NetUP Universal Dual DVB-CI
5  *
6  * Copyright (C) 2014 NetUP Inc.
7  * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
8  * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  */
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kmod.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/list.h>
30 #include <media/videobuf2-v4l2.h>
31 #include <media/videobuf2-vmalloc.h>
32
33 #include "netup_unidvb.h"
34 #include "cxd2841er.h"
35 #include "horus3a.h"
36 #include "ascot2e.h"
37 #include "lnbh25.h"
38
39 static int spi_enable;
40 module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
41
42 MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
43 MODULE_AUTHOR("info@netup.ru");
44 MODULE_VERSION(NETUP_UNIDVB_VERSION);
45 MODULE_LICENSE("GPL");
46
47 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
48
49 /* Avalon-MM PCI-E registers */
50 #define AVL_PCIE_IENR           0x50
51 #define AVL_PCIE_ISR            0x40
52 #define AVL_IRQ_ENABLE          0x80
53 #define AVL_IRQ_ASSERTED        0x80
54 /* GPIO registers */
55 #define GPIO_REG_IO             0x4880
56 #define GPIO_REG_IO_TOGGLE      0x4882
57 #define GPIO_REG_IO_SET         0x4884
58 #define GPIO_REG_IO_CLEAR       0x4886
59 /* GPIO bits */
60 #define GPIO_FEA_RESET          (1 << 0)
61 #define GPIO_FEB_RESET          (1 << 1)
62 #define GPIO_RFA_CTL            (1 << 2)
63 #define GPIO_RFB_CTL            (1 << 3)
64 #define GPIO_FEA_TU_RESET       (1 << 4)
65 #define GPIO_FEB_TU_RESET       (1 << 5)
66 /* DMA base address */
67 #define NETUP_DMA0_ADDR         0x4900
68 #define NETUP_DMA1_ADDR         0x4940
69 /* 8 DMA blocks * 128 packets * 188 bytes*/
70 #define NETUP_DMA_BLOCKS_COUNT  8
71 #define NETUP_DMA_PACKETS_COUNT 128
72 /* DMA status bits */
73 #define BIT_DMA_RUN             1
74 #define BIT_DMA_ERROR           2
75 #define BIT_DMA_IRQ             0x200
76
77 /**
78  * struct netup_dma_regs - the map of DMA module registers
79  * @ctrlstat_set:       Control register, write to set control bits
80  * @ctrlstat_clear:     Control register, write to clear control bits
81  * @start_addr_lo:      DMA ring buffer start address, lower part
82  * @start_addr_hi:      DMA ring buffer start address, higher part
83  * @size:               DMA ring buffer size register
84                         Bits [0-7]:     DMA packet size, 188 bytes
85                         Bits [16-23]:   packets count in block, 128 packets
86                         Bits [24-31]:   blocks count, 8 blocks
87  * @timeout:            DMA timeout in units of 8ns
88                         For example, value of 375000000 equals to 3 sec
89  * @curr_addr_lo:       Current ring buffer head address, lower part
90  * @curr_addr_hi:       Current ring buffer head address, higher part
91  * @stat_pkt_received:  Statistic register, not tested
92  * @stat_pkt_accepted:  Statistic register, not tested
93  * @stat_pkt_overruns:  Statistic register, not tested
94  * @stat_pkt_underruns: Statistic register, not tested
95  * @stat_fifo_overruns: Statistic register, not tested
96  */
97 struct netup_dma_regs {
98         __le32  ctrlstat_set;
99         __le32  ctrlstat_clear;
100         __le32  start_addr_lo;
101         __le32  start_addr_hi;
102         __le32  size;
103         __le32  timeout;
104         __le32  curr_addr_lo;
105         __le32  curr_addr_hi;
106         __le32  stat_pkt_received;
107         __le32  stat_pkt_accepted;
108         __le32  stat_pkt_overruns;
109         __le32  stat_pkt_underruns;
110         __le32  stat_fifo_overruns;
111 } __packed __aligned(1);
112
113 struct netup_unidvb_buffer {
114         struct vb2_v4l2_buffer vb;
115         struct list_head        list;
116         u32                     size;
117 };
118
119 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
120 static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
121
122 static struct cxd2841er_config demod_config = {
123         .i2c_addr = 0xc8
124 };
125
126 static struct horus3a_config horus3a_conf = {
127         .i2c_address = 0xc0,
128         .xtal_freq_mhz = 16,
129         .set_tuner_callback = netup_unidvb_tuner_ctrl
130 };
131
132 static struct ascot2e_config ascot2e_conf = {
133         .i2c_address = 0xc2,
134         .set_tuner_callback = netup_unidvb_tuner_ctrl
135 };
136
137 static struct lnbh25_config lnbh25_conf = {
138         .i2c_address = 0x10,
139         .data2_config = LNBH25_TEN | LNBH25_EXTM
140 };
141
142 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
143 {
144         u8 reg, mask;
145         struct netup_dma *dma = priv;
146         struct netup_unidvb_dev *ndev;
147
148         if (!priv)
149                 return -EINVAL;
150         ndev = dma->ndev;
151         dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
152                 __func__, dma->num, is_dvb_tc);
153         reg = readb(ndev->bmmio0 + GPIO_REG_IO);
154         mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
155         if (!is_dvb_tc)
156                 reg |= mask;
157         else
158                 reg &= ~mask;
159         writeb(reg, ndev->bmmio0 + GPIO_REG_IO);
160         return 0;
161 }
162
163 static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
164 {
165         u16 gpio_reg;
166
167         /* enable PCI-E interrupts */
168         writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
169         /* unreset frontends bits[0:1] */
170         writeb(0x00, ndev->bmmio0 + GPIO_REG_IO);
171         msleep(100);
172         gpio_reg =
173                 GPIO_FEA_RESET | GPIO_FEB_RESET |
174                 GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
175                 GPIO_RFA_CTL | GPIO_RFB_CTL;
176         writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO);
177         dev_dbg(&ndev->pci_dev->dev,
178                 "%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
179                 __func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
180                 (int)readb(ndev->bmmio0 + GPIO_REG_IO));
181
182 }
183
184 static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
185 {
186         u32 irq_mask = (dma->num == 0 ?
187                 NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
188
189         dev_dbg(&dma->ndev->pci_dev->dev,
190                 "%s(): DMA%d enable %d\n", __func__, dma->num, enable);
191         if (enable) {
192                 writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
193                 writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_SET);
194         } else {
195                 writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
196                 writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_CLEAR);
197         }
198 }
199
200 static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
201 {
202         u64 addr_curr;
203         u32 size;
204         unsigned long flags;
205         struct device *dev = &dma->ndev->pci_dev->dev;
206
207         spin_lock_irqsave(&dma->lock, flags);
208         addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) |
209                 (u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr;
210         /* clear IRQ */
211         writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
212         /* sanity check */
213         if (addr_curr < dma->addr_phys ||
214                         addr_curr > dma->addr_phys +  dma->ring_buffer_size) {
215                 if (addr_curr != 0) {
216                         dev_err(dev,
217                                 "%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
218                                 __func__, addr_curr, (u64)dma->addr_phys,
219                                 (u64)(dma->addr_phys + dma->ring_buffer_size));
220                 }
221                 goto irq_handled;
222         }
223         size = (addr_curr >= dma->addr_last) ?
224                 (u32)(addr_curr - dma->addr_last) :
225                 (u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
226         if (dma->data_size != 0) {
227                 printk_ratelimited("%s(): lost interrupt, data size %d\n",
228                         __func__, dma->data_size);
229                 dma->data_size += size;
230         }
231         if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
232                 dma->data_size = size;
233                 dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
234         }
235         dma->addr_last = addr_curr;
236         queue_work(dma->ndev->wq, &dma->work);
237 irq_handled:
238         spin_unlock_irqrestore(&dma->lock, flags);
239         return IRQ_HANDLED;
240 }
241
242 static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
243 {
244         struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
245         struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
246         u32 reg40, reg_isr;
247         irqreturn_t iret = IRQ_NONE;
248
249         /* disable interrupts */
250         writel(0, ndev->bmmio0 + AVL_PCIE_IENR);
251         /* check IRQ source */
252         reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR);
253         if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
254                 /* IRQ is being signaled */
255                 reg_isr = readw(ndev->bmmio0 + REG_ISR);
256                 if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
257                         iret = netup_i2c_interrupt(&ndev->i2c[0]);
258                 } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
259                         iret = netup_i2c_interrupt(&ndev->i2c[1]);
260                 } else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) {
261                         iret = netup_spi_interrupt(ndev->spi);
262                 } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
263                         iret = netup_dma_interrupt(&ndev->dma[0]);
264                 } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
265                         iret = netup_dma_interrupt(&ndev->dma[1]);
266                 } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
267                         iret = netup_ci_interrupt(ndev);
268                 } else {
269                         dev_err(&pci_dev->dev,
270                                 "%s(): unknown interrupt 0x%x\n",
271                                 __func__, reg_isr);
272                 }
273         }
274         /* re-enable interrupts */
275         writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
276         return iret;
277 }
278
279 static int netup_unidvb_queue_setup(struct vb2_queue *vq,
280                                     unsigned int *nbuffers,
281                                     unsigned int *nplanes,
282                                     unsigned int sizes[],
283                                     void *alloc_ctxs[])
284 {
285         struct netup_dma *dma = vb2_get_drv_priv(vq);
286
287         dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
288
289         *nplanes = 1;
290         if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
291                 *nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
292         sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
293         dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
294                 __func__, *nbuffers, sizes[0]);
295         return 0;
296 }
297
298 static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
299 {
300         struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
301         struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
302         struct netup_unidvb_buffer *buf = container_of(vbuf,
303                                 struct netup_unidvb_buffer, vb);
304
305         dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
306         buf->size = 0;
307         return 0;
308 }
309
310 static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
311 {
312         unsigned long flags;
313         struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
314         struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
315         struct netup_unidvb_buffer *buf = container_of(vbuf,
316                                 struct netup_unidvb_buffer, vb);
317
318         dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
319         spin_lock_irqsave(&dma->lock, flags);
320         list_add_tail(&buf->list, &dma->free_buffers);
321         spin_unlock_irqrestore(&dma->lock, flags);
322         mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000));
323 }
324
325 static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
326 {
327         struct netup_dma *dma = vb2_get_drv_priv(q);
328
329         dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
330         netup_unidvb_dma_enable(dma, 1);
331         return 0;
332 }
333
334 static void netup_unidvb_stop_streaming(struct vb2_queue *q)
335 {
336         struct netup_dma *dma = vb2_get_drv_priv(q);
337
338         dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
339         netup_unidvb_dma_enable(dma, 0);
340         netup_unidvb_queue_cleanup(dma);
341 }
342
343 static struct vb2_ops dvb_qops = {
344         .queue_setup            = netup_unidvb_queue_setup,
345         .buf_prepare            = netup_unidvb_buf_prepare,
346         .buf_queue              = netup_unidvb_buf_queue,
347         .start_streaming        = netup_unidvb_start_streaming,
348         .stop_streaming         = netup_unidvb_stop_streaming,
349 };
350
351 static int netup_unidvb_queue_init(struct netup_dma *dma,
352                                    struct vb2_queue *vb_queue)
353 {
354         int res;
355
356         /* Init videobuf2 queue structure */
357         vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
358         vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
359         vb_queue->drv_priv = dma;
360         vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
361         vb_queue->ops = &dvb_qops;
362         vb_queue->mem_ops = &vb2_vmalloc_memops;
363         vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
364         res = vb2_queue_init(vb_queue);
365         if (res != 0) {
366                 dev_err(&dma->ndev->pci_dev->dev,
367                         "%s(): vb2_queue_init failed (%d)\n", __func__, res);
368         }
369         return res;
370 }
371
372 static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
373                                  int num)
374 {
375         struct vb2_dvb_frontend *fe0, *fe1, *fe2;
376
377         if (num < 0 || num > 1) {
378                 dev_dbg(&ndev->pci_dev->dev,
379                         "%s(): unable to init DVB bus %d\n", __func__, num);
380                 return -ENODEV;
381         }
382         mutex_init(&ndev->frontends[num].lock);
383         INIT_LIST_HEAD(&ndev->frontends[num].felist);
384         if (vb2_dvb_alloc_frontend(&ndev->frontends[num], 1) == NULL ||
385                 vb2_dvb_alloc_frontend(
386                         &ndev->frontends[num], 2) == NULL ||
387                 vb2_dvb_alloc_frontend(
388                         &ndev->frontends[num], 3) == NULL) {
389                 dev_dbg(&ndev->pci_dev->dev,
390                         "%s(): unable to allocate vb2_dvb_frontend\n",
391                         __func__);
392                 return -ENOMEM;
393         }
394         fe0 = vb2_dvb_get_frontend(&ndev->frontends[num], 1);
395         fe1 = vb2_dvb_get_frontend(&ndev->frontends[num], 2);
396         fe2 = vb2_dvb_get_frontend(&ndev->frontends[num], 3);
397         if (fe0 == NULL || fe1 == NULL || fe2 == NULL) {
398                 dev_dbg(&ndev->pci_dev->dev,
399                         "%s(): frontends has not been allocated\n", __func__);
400                 return -EINVAL;
401         }
402         netup_unidvb_queue_init(&ndev->dma[num], &fe0->dvb.dvbq);
403         netup_unidvb_queue_init(&ndev->dma[num], &fe1->dvb.dvbq);
404         netup_unidvb_queue_init(&ndev->dma[num], &fe2->dvb.dvbq);
405         fe0->dvb.name = "netup_fe0";
406         fe1->dvb.name = "netup_fe1";
407         fe2->dvb.name = "netup_fe2";
408         fe0->dvb.frontend = dvb_attach(cxd2841er_attach_s,
409                 &demod_config, &ndev->i2c[num].adap);
410         if (fe0->dvb.frontend == NULL) {
411                 dev_dbg(&ndev->pci_dev->dev,
412                         "%s(): unable to attach DVB-S/S2 frontend\n",
413                         __func__);
414                 goto frontend_detach;
415         }
416         horus3a_conf.set_tuner_priv = &ndev->dma[num];
417         if (!dvb_attach(horus3a_attach, fe0->dvb.frontend,
418                         &horus3a_conf, &ndev->i2c[num].adap)) {
419                 dev_dbg(&ndev->pci_dev->dev,
420                         "%s(): unable to attach DVB-S/S2 tuner frontend\n",
421                         __func__);
422                 goto frontend_detach;
423         }
424         if (!dvb_attach(lnbh25_attach, fe0->dvb.frontend,
425                         &lnbh25_conf, &ndev->i2c[num].adap)) {
426                 dev_dbg(&ndev->pci_dev->dev,
427                         "%s(): unable to attach SEC frontend\n", __func__);
428                 goto frontend_detach;
429         }
430         /* DVB-T/T2 frontend */
431         fe1->dvb.frontend = dvb_attach(cxd2841er_attach_t,
432                 &demod_config, &ndev->i2c[num].adap);
433         if (fe1->dvb.frontend == NULL) {
434                 dev_dbg(&ndev->pci_dev->dev,
435                         "%s(): unable to attach DVB-T frontend\n", __func__);
436                 goto frontend_detach;
437         }
438         fe1->dvb.frontend->id = 1;
439         ascot2e_conf.set_tuner_priv = &ndev->dma[num];
440         if (!dvb_attach(ascot2e_attach, fe1->dvb.frontend,
441                         &ascot2e_conf, &ndev->i2c[num].adap)) {
442                 dev_dbg(&ndev->pci_dev->dev,
443                         "%s(): unable to attach DVB-T tuner frontend\n",
444                         __func__);
445                 goto frontend_detach;
446         }
447         /* DVB-C/C2 frontend */
448         fe2->dvb.frontend = dvb_attach(cxd2841er_attach_c,
449                                 &demod_config, &ndev->i2c[num].adap);
450         if (fe2->dvb.frontend == NULL) {
451                 dev_dbg(&ndev->pci_dev->dev,
452                         "%s(): unable to attach DVB-C frontend\n", __func__);
453                 goto frontend_detach;
454         }
455         fe2->dvb.frontend->id = 2;
456         if (!dvb_attach(ascot2e_attach, fe2->dvb.frontend,
457                         &ascot2e_conf, &ndev->i2c[num].adap)) {
458                 dev_dbg(&ndev->pci_dev->dev,
459                         "%s(): unable to attach DVB-T/C tuner frontend\n",
460                         __func__);
461                 goto frontend_detach;
462         }
463
464         if (vb2_dvb_register_bus(&ndev->frontends[num],
465                         THIS_MODULE, NULL,
466                         &ndev->pci_dev->dev, adapter_nr, 1)) {
467                 dev_dbg(&ndev->pci_dev->dev,
468                         "%s(): unable to register DVB bus %d\n",
469                         __func__, num);
470                 goto frontend_detach;
471         }
472         dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
473         return 0;
474 frontend_detach:
475         vb2_dvb_dealloc_frontends(&ndev->frontends[num]);
476         return -EINVAL;
477 }
478
479 static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
480 {
481         if (num < 0 || num > 1) {
482                 dev_err(&ndev->pci_dev->dev,
483                         "%s(): unable to unregister DVB bus %d\n",
484                         __func__, num);
485                 return;
486         }
487         vb2_dvb_unregister_bus(&ndev->frontends[num]);
488         dev_info(&ndev->pci_dev->dev,
489                 "%s(): DVB bus %d unregistered\n", __func__, num);
490 }
491
492 static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
493 {
494         int res;
495
496         res = netup_unidvb_dvb_init(ndev, 0);
497         if (res)
498                 return res;
499         res = netup_unidvb_dvb_init(ndev, 1);
500         if (res) {
501                 netup_unidvb_dvb_fini(ndev, 0);
502                 return res;
503         }
504         return 0;
505 }
506
507 static int netup_unidvb_ring_copy(struct netup_dma *dma,
508                                   struct netup_unidvb_buffer *buf)
509 {
510         u32 copy_bytes, ring_bytes;
511         u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
512         u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
513         struct netup_unidvb_dev *ndev = dma->ndev;
514
515         if (p == NULL) {
516                 dev_err(&ndev->pci_dev->dev,
517                         "%s(): buffer is NULL\n", __func__);
518                 return -EINVAL;
519         }
520         p += buf->size;
521         if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
522                 ring_bytes = dma->ring_buffer_size - dma->data_offset;
523                 copy_bytes = (ring_bytes > buff_bytes) ?
524                         buff_bytes : ring_bytes;
525                 memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
526                 p += copy_bytes;
527                 buf->size += copy_bytes;
528                 buff_bytes -= copy_bytes;
529                 dma->data_size -= copy_bytes;
530                 dma->data_offset += copy_bytes;
531                 if (dma->data_offset == dma->ring_buffer_size)
532                         dma->data_offset = 0;
533         }
534         if (buff_bytes > 0) {
535                 ring_bytes = dma->data_size;
536                 copy_bytes = (ring_bytes > buff_bytes) ?
537                                 buff_bytes : ring_bytes;
538                 memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
539                 buf->size += copy_bytes;
540                 dma->data_size -= copy_bytes;
541                 dma->data_offset += copy_bytes;
542                 if (dma->data_offset == dma->ring_buffer_size)
543                         dma->data_offset = 0;
544         }
545         return 0;
546 }
547
548 static void netup_unidvb_dma_worker(struct work_struct *work)
549 {
550         struct netup_dma *dma = container_of(work, struct netup_dma, work);
551         struct netup_unidvb_dev *ndev = dma->ndev;
552         struct netup_unidvb_buffer *buf;
553         unsigned long flags;
554
555         spin_lock_irqsave(&dma->lock, flags);
556         if (dma->data_size == 0) {
557                 dev_dbg(&ndev->pci_dev->dev,
558                         "%s(): data_size == 0\n", __func__);
559                 goto work_done;
560         }
561         while (dma->data_size > 0) {
562                 if (list_empty(&dma->free_buffers)) {
563                         dev_dbg(&ndev->pci_dev->dev,
564                                 "%s(): no free buffers\n", __func__);
565                         goto work_done;
566                 }
567                 buf = list_first_entry(&dma->free_buffers,
568                         struct netup_unidvb_buffer, list);
569                 if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
570                         dev_dbg(&ndev->pci_dev->dev,
571                                 "%s(): buffer overflow, size %d\n",
572                                 __func__, buf->size);
573                         goto work_done;
574                 }
575                 if (netup_unidvb_ring_copy(dma, buf))
576                         goto work_done;
577                 if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
578                         list_del(&buf->list);
579                         dev_dbg(&ndev->pci_dev->dev,
580                                 "%s(): buffer %p done, size %d\n",
581                                 __func__, buf, buf->size);
582                         buf->vb.vb2_buf.timestamp = ktime_get_ns();
583                         vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
584                         vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
585                 }
586         }
587 work_done:
588         dma->data_size = 0;
589         spin_unlock_irqrestore(&dma->lock, flags);
590 }
591
592 static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
593 {
594         struct netup_unidvb_buffer *buf;
595         unsigned long flags;
596
597         spin_lock_irqsave(&dma->lock, flags);
598         while (!list_empty(&dma->free_buffers)) {
599                 buf = list_first_entry(&dma->free_buffers,
600                         struct netup_unidvb_buffer, list);
601                 list_del(&buf->list);
602                 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
603         }
604         spin_unlock_irqrestore(&dma->lock, flags);
605 }
606
607 static void netup_unidvb_dma_timeout(unsigned long data)
608 {
609         struct netup_dma *dma = (struct netup_dma *)data;
610         struct netup_unidvb_dev *ndev = dma->ndev;
611
612         dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
613         netup_unidvb_queue_cleanup(dma);
614 }
615
616 static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
617 {
618         struct netup_dma *dma;
619         struct device *dev = &ndev->pci_dev->dev;
620
621         if (num < 0 || num > 1) {
622                 dev_err(dev, "%s(): unable to register DMA%d\n",
623                         __func__, num);
624                 return -ENODEV;
625         }
626         dma = &ndev->dma[num];
627         dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
628         dma->num = num;
629         dma->ndev = ndev;
630         spin_lock_init(&dma->lock);
631         INIT_WORK(&dma->work, netup_unidvb_dma_worker);
632         INIT_LIST_HEAD(&dma->free_buffers);
633         dma->timeout.function = netup_unidvb_dma_timeout;
634         dma->timeout.data = (unsigned long)dma;
635         init_timer(&dma->timeout);
636         dma->ring_buffer_size = ndev->dma_size / 2;
637         dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
638         dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
639                 dma->ring_buffer_size * num);
640         dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
641                 __func__, num, dma->addr_virt,
642                 (unsigned long long)dma->addr_phys,
643                 dma->ring_buffer_size);
644         memset_io((u8 __iomem *)dma->addr_virt, 0, dma->ring_buffer_size);
645         dma->addr_last = dma->addr_phys;
646         dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
647         dma->regs = (struct netup_dma_regs __iomem *)(num == 0 ?
648                 ndev->bmmio0 + NETUP_DMA0_ADDR :
649                 ndev->bmmio0 + NETUP_DMA1_ADDR);
650         writel((NETUP_DMA_BLOCKS_COUNT << 24) |
651                 (NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size);
652         writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo);
653         writel(0, &dma->regs->start_addr_hi);
654         writel(dma->high_addr, ndev->bmmio0 + 0x1000);
655         writel(375000000, &dma->regs->timeout);
656         msleep(1000);
657         writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
658         return 0;
659 }
660
661 static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
662 {
663         struct netup_dma *dma;
664
665         if (num < 0 || num > 1)
666                 return;
667         dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
668         dma = &ndev->dma[num];
669         netup_unidvb_dma_enable(dma, 0);
670         msleep(50);
671         cancel_work_sync(&dma->work);
672         del_timer(&dma->timeout);
673 }
674
675 static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
676 {
677         int res;
678
679         res = netup_unidvb_dma_init(ndev, 0);
680         if (res)
681                 return res;
682         res = netup_unidvb_dma_init(ndev, 1);
683         if (res) {
684                 netup_unidvb_dma_fini(ndev, 0);
685                 return res;
686         }
687         netup_unidvb_dma_enable(&ndev->dma[0], 0);
688         netup_unidvb_dma_enable(&ndev->dma[1], 0);
689         return 0;
690 }
691
692 static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
693                                  struct pci_dev *pci_dev)
694 {
695         int res;
696
697         writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET);
698         res = netup_unidvb_ci_register(ndev, 0, pci_dev);
699         if (res)
700                 return res;
701         res = netup_unidvb_ci_register(ndev, 1, pci_dev);
702         if (res)
703                 netup_unidvb_ci_unregister(ndev, 0);
704         return res;
705 }
706
707 static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
708 {
709         if (!request_mem_region(pci_resource_start(pci_dev, 0),
710                         pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
711                 dev_err(&pci_dev->dev,
712                         "%s(): unable to request MMIO bar 0 at 0x%llx\n",
713                         __func__,
714                         (unsigned long long)pci_resource_start(pci_dev, 0));
715                 return -EBUSY;
716         }
717         if (!request_mem_region(pci_resource_start(pci_dev, 1),
718                         pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
719                 dev_err(&pci_dev->dev,
720                         "%s(): unable to request MMIO bar 1 at 0x%llx\n",
721                         __func__,
722                         (unsigned long long)pci_resource_start(pci_dev, 1));
723                 release_mem_region(pci_resource_start(pci_dev, 0),
724                         pci_resource_len(pci_dev, 0));
725                 return -EBUSY;
726         }
727         return 0;
728 }
729
730 static int netup_unidvb_request_modules(struct device *dev)
731 {
732         static const char * const modules[] = {
733                 "lnbh25", "ascot2e", "horus3a", "cxd2841er", NULL
734         };
735         const char * const *curr_mod = modules;
736         int err;
737
738         while (*curr_mod != NULL) {
739                 err = request_module(*curr_mod);
740                 if (err) {
741                         dev_warn(dev, "request_module(%s) failed: %d\n",
742                                 *curr_mod, err);
743                 }
744                 ++curr_mod;
745         }
746         return 0;
747 }
748
749 static int netup_unidvb_initdev(struct pci_dev *pci_dev,
750                                 const struct pci_device_id *pci_id)
751 {
752         u8 board_revision;
753         u16 board_vendor;
754         struct netup_unidvb_dev *ndev;
755         int old_firmware = 0;
756
757         netup_unidvb_request_modules(&pci_dev->dev);
758
759         /* Check card revision */
760         if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
761                 dev_err(&pci_dev->dev,
762                         "netup_unidvb: expected card revision %d, got %d\n",
763                         NETUP_PCI_DEV_REVISION, pci_dev->revision);
764                 dev_err(&pci_dev->dev,
765                         "Please upgrade firmware!\n");
766                 dev_err(&pci_dev->dev,
767                         "Instructions on http://www.netup.tv\n");
768                 old_firmware = 1;
769                 spi_enable = 1;
770         }
771
772         /* allocate device context */
773         ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
774
775         if (!ndev)
776                 goto dev_alloc_err;
777         memset(ndev, 0, sizeof(*ndev));
778         ndev->old_fw = old_firmware;
779         ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
780         if (!ndev->wq) {
781                 dev_err(&pci_dev->dev,
782                         "%s(): unable to create workqueue\n", __func__);
783                 goto wq_create_err;
784         }
785         ndev->pci_dev = pci_dev;
786         ndev->pci_bus = pci_dev->bus->number;
787         ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
788         ndev->pci_func = PCI_FUNC(pci_dev->devfn);
789         ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
790         pci_set_drvdata(pci_dev, ndev);
791         /* PCI init */
792         dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
793                 __func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
794
795         if (pci_enable_device(pci_dev)) {
796                 dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
797                         __func__);
798                 goto pci_enable_err;
799         }
800         /* read PCI info */
801         pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision);
802         pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor);
803         if (board_vendor != NETUP_VENDOR_ID) {
804                 dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
805                         __func__, board_vendor);
806                 goto pci_detect_err;
807         }
808         dev_info(&pci_dev->dev,
809                 "%s(): board vendor 0x%x, revision 0x%x\n",
810                 __func__, board_vendor, board_revision);
811         pci_set_master(pci_dev);
812         if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
813                 dev_err(&pci_dev->dev,
814                         "%s(): 32bit PCI DMA is not supported\n", __func__);
815                 goto pci_detect_err;
816         }
817         dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
818         /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
819         pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL,
820                 PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
821                 PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
822         /* Adjust PCIe completion timeout. */
823         pcie_capability_clear_and_set_word(pci_dev,
824                 PCI_EXP_DEVCTL2, 0xf, 0x2);
825
826         if (netup_unidvb_request_mmio(pci_dev)) {
827                 dev_err(&pci_dev->dev,
828                         "%s(): unable to request MMIO regions\n", __func__);
829                 goto pci_detect_err;
830         }
831         ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
832                 pci_resource_len(pci_dev, 0));
833         if (!ndev->lmmio0) {
834                 dev_err(&pci_dev->dev,
835                         "%s(): unable to remap MMIO bar 0\n", __func__);
836                 goto pci_bar0_error;
837         }
838         ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
839                 pci_resource_len(pci_dev, 1));
840         if (!ndev->lmmio1) {
841                 dev_err(&pci_dev->dev,
842                         "%s(): unable to remap MMIO bar 1\n", __func__);
843                 goto pci_bar1_error;
844         }
845         ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
846         ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
847         dev_info(&pci_dev->dev,
848                 "%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
849                 __func__,
850                 ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
851                 ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
852                 pci_dev->irq);
853         if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
854                         "netup_unidvb", pci_dev) < 0) {
855                 dev_err(&pci_dev->dev,
856                         "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
857                 goto irq_request_err;
858         }
859         ndev->dma_size = 2 * 188 *
860                 NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
861         ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
862                 ndev->dma_size, &ndev->dma_phys, GFP_KERNEL);
863         if (!ndev->dma_virt) {
864                 dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
865                         __func__);
866                 goto dma_alloc_err;
867         }
868         netup_unidvb_dev_enable(ndev);
869         if (spi_enable && netup_spi_init(ndev)) {
870                 dev_warn(&pci_dev->dev,
871                         "netup_unidvb: SPI flash setup failed\n");
872                 goto spi_setup_err;
873         }
874         if (old_firmware) {
875                 dev_err(&pci_dev->dev,
876                         "netup_unidvb: card initialization was incomplete\n");
877                 return 0;
878         }
879         if (netup_i2c_register(ndev)) {
880                 dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
881                 goto i2c_setup_err;
882         }
883         /* enable I2C IRQs */
884         writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
885                 ndev->bmmio0 + REG_IMASK_SET);
886         usleep_range(5000, 10000);
887         if (netup_unidvb_dvb_setup(ndev)) {
888                 dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
889                 goto dvb_setup_err;
890         }
891         if (netup_unidvb_ci_setup(ndev, pci_dev)) {
892                 dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
893                 goto ci_setup_err;
894         }
895         if (netup_unidvb_dma_setup(ndev)) {
896                 dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
897                 goto dma_setup_err;
898         }
899         dev_info(&pci_dev->dev,
900                 "netup_unidvb: device has been initialized\n");
901         return 0;
902 dma_setup_err:
903         netup_unidvb_ci_unregister(ndev, 0);
904         netup_unidvb_ci_unregister(ndev, 1);
905 ci_setup_err:
906         netup_unidvb_dvb_fini(ndev, 0);
907         netup_unidvb_dvb_fini(ndev, 1);
908 dvb_setup_err:
909         netup_i2c_unregister(ndev);
910 i2c_setup_err:
911         if (ndev->spi)
912                 netup_spi_release(ndev);
913 spi_setup_err:
914         dma_free_coherent(&pci_dev->dev, ndev->dma_size,
915                         ndev->dma_virt, ndev->dma_phys);
916 dma_alloc_err:
917         free_irq(pci_dev->irq, pci_dev);
918 irq_request_err:
919         iounmap(ndev->lmmio1);
920 pci_bar1_error:
921         iounmap(ndev->lmmio0);
922 pci_bar0_error:
923         release_mem_region(pci_resource_start(pci_dev, 0),
924                 pci_resource_len(pci_dev, 0));
925         release_mem_region(pci_resource_start(pci_dev, 1),
926                 pci_resource_len(pci_dev, 1));
927 pci_detect_err:
928         pci_disable_device(pci_dev);
929 pci_enable_err:
930         pci_set_drvdata(pci_dev, NULL);
931         destroy_workqueue(ndev->wq);
932 wq_create_err:
933         kfree(ndev);
934 dev_alloc_err:
935         dev_err(&pci_dev->dev,
936                 "%s(): failed to initizalize device\n", __func__);
937         return -EIO;
938 }
939
940 static void netup_unidvb_finidev(struct pci_dev *pci_dev)
941 {
942         struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
943
944         dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
945         if (!ndev->old_fw) {
946                 netup_unidvb_dma_fini(ndev, 0);
947                 netup_unidvb_dma_fini(ndev, 1);
948                 netup_unidvb_ci_unregister(ndev, 0);
949                 netup_unidvb_ci_unregister(ndev, 1);
950                 netup_unidvb_dvb_fini(ndev, 0);
951                 netup_unidvb_dvb_fini(ndev, 1);
952                 netup_i2c_unregister(ndev);
953         }
954         if (ndev->spi)
955                 netup_spi_release(ndev);
956         writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR);
957         dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size,
958                         ndev->dma_virt, ndev->dma_phys);
959         free_irq(pci_dev->irq, pci_dev);
960         iounmap(ndev->lmmio0);
961         iounmap(ndev->lmmio1);
962         release_mem_region(pci_resource_start(pci_dev, 0),
963                 pci_resource_len(pci_dev, 0));
964         release_mem_region(pci_resource_start(pci_dev, 1),
965                 pci_resource_len(pci_dev, 1));
966         pci_disable_device(pci_dev);
967         pci_set_drvdata(pci_dev, NULL);
968         destroy_workqueue(ndev->wq);
969         kfree(ndev);
970         dev_info(&pci_dev->dev,
971                 "%s(): device has been successfully stopped\n", __func__);
972 }
973
974
975 static struct pci_device_id netup_unidvb_pci_tbl[] = {
976         { PCI_DEVICE(0x1b55, 0x18f6) },
977         { 0, }
978 };
979 MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
980
981 static struct pci_driver netup_unidvb_pci_driver = {
982         .name     = "netup_unidvb",
983         .id_table = netup_unidvb_pci_tbl,
984         .probe    = netup_unidvb_initdev,
985         .remove   = netup_unidvb_finidev,
986         .suspend  = NULL,
987         .resume   = NULL,
988 };
989
990 static int __init netup_unidvb_init(void)
991 {
992         return pci_register_driver(&netup_unidvb_pci_driver);
993 }
994
995 static void __exit netup_unidvb_fini(void)
996 {
997         pci_unregister_driver(&netup_unidvb_pci_driver);
998 }
999
1000 module_init(netup_unidvb_init);
1001 module_exit(netup_unidvb_fini);