staging: comedi: addi_apci_3120: define the Add-On registers
[cascardo/linux.git] / drivers / staging / comedi / drivers / mite.c
1 /*
2     comedi/drivers/mite.c
3     Hardware driver for NI Mite PCI interface chip
4
5     COMEDI - Linux Control and Measurement Device Interface
6     Copyright (C) 1997-2002 David A. Schleef <ds@schleef.org>
7
8     This program is free software; you can redistribute it and/or modify
9     it under the terms of the GNU General Public License as published by
10     the Free Software Foundation; either version 2 of the License, or
11     (at your option) any later version.
12
13     This program is distributed in the hope that it will be useful,
14     but WITHOUT ANY WARRANTY; without even the implied warranty of
15     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16     GNU General Public License for more details.
17 */
18
19 /*
20         The PCI-MIO E series driver was originally written by
21         Tomasz Motylewski <...>, and ported to comedi by ds.
22
23         References for specifications:
24
25            321747b.pdf  Register Level Programmer Manual (obsolete)
26            321747c.pdf  Register Level Programmer Manual (new)
27            DAQ-STC reference manual
28
29         Other possibly relevant info:
30
31            320517c.pdf  User manual (obsolete)
32            320517f.pdf  User manual (new)
33            320889a.pdf  delete
34            320906c.pdf  maximum signal ratings
35            321066a.pdf  about 16x
36            321791a.pdf  discontinuation of at-mio-16e-10 rev. c
37            321808a.pdf  about at-mio-16e-10 rev P
38            321837a.pdf  discontinuation of at-mio-16de-10 rev d
39            321838a.pdf  about at-mio-16de-10 rev N
40
41         ISSUES:
42
43 */
44
45 /* #define USE_KMALLOC */
46
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
49 #include <linux/module.h>
50 #include <linux/pci.h>
51
52 #include "../comedidev.h"
53
54 #include "comedi_fc.h"
55 #include "mite.h"
56
57 #define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK)))
58
59 struct mite_struct *mite_alloc(struct pci_dev *pcidev)
60 {
61         struct mite_struct *mite;
62         unsigned int i;
63
64         mite = kzalloc(sizeof(*mite), GFP_KERNEL);
65         if (mite) {
66                 spin_lock_init(&mite->lock);
67                 mite->pcidev = pcidev;
68                 for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
69                         mite->channels[i].mite = mite;
70                         mite->channels[i].channel = i;
71                         mite->channels[i].done = 1;
72                 }
73         }
74         return mite;
75 }
76 EXPORT_SYMBOL_GPL(mite_alloc);
77
78 static void dump_chip_signature(u32 csigr_bits)
79 {
80         pr_info("version = %i, type = %i, mite mode = %i, interface mode = %i\n",
81                 mite_csigr_version(csigr_bits), mite_csigr_type(csigr_bits),
82                 mite_csigr_mmode(csigr_bits), mite_csigr_imode(csigr_bits));
83         pr_info("num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
84                 mite_csigr_dmac(csigr_bits), mite_csigr_wpdep(csigr_bits),
85                 mite_csigr_wins(csigr_bits), mite_csigr_iowins(csigr_bits));
86 }
87
88 static unsigned mite_fifo_size(struct mite_struct *mite, unsigned channel)
89 {
90         unsigned fcr_bits = readl(mite->mite_io_addr + MITE_FCR(channel));
91         unsigned empty_count = (fcr_bits >> 16) & 0xff;
92         unsigned full_count = fcr_bits & 0xff;
93
94         return empty_count + full_count;
95 }
96
97 int mite_setup2(struct comedi_device *dev,
98                 struct mite_struct *mite, bool use_win1)
99 {
100         unsigned long length;
101         int i;
102         u32 csigr_bits;
103         unsigned unknown_dma_burst_bits;
104
105         pci_set_master(mite->pcidev);
106
107         mite->mite_io_addr = pci_ioremap_bar(mite->pcidev, 0);
108         if (!mite->mite_io_addr) {
109                 dev_err(dev->class_dev,
110                         "Failed to remap mite io memory address\n");
111                 return -ENOMEM;
112         }
113         mite->mite_phys_addr = pci_resource_start(mite->pcidev, 0);
114
115         dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
116         if (!dev->mmio) {
117                 dev_err(dev->class_dev,
118                         "Failed to remap daq io memory address\n");
119                 return -ENOMEM;
120         }
121         mite->daq_phys_addr = pci_resource_start(mite->pcidev, 1);
122         length = pci_resource_len(mite->pcidev, 1);
123
124         if (use_win1) {
125                 writel(0, mite->mite_io_addr + MITE_IODWBSR);
126                 dev_info(dev->class_dev,
127                          "using I/O Window Base Size register 1\n");
128                 writel(mite->daq_phys_addr | WENAB |
129                        MITE_IODWBSR_1_WSIZE_bits(length),
130                        mite->mite_io_addr + MITE_IODWBSR_1);
131                 writel(0, mite->mite_io_addr + MITE_IODWCR_1);
132         } else {
133                 writel(mite->daq_phys_addr | WENAB,
134                        mite->mite_io_addr + MITE_IODWBSR);
135         }
136         /*
137          * make sure dma bursts work. I got this from running a bus analyzer
138          * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
139          * of 0x61f and bursts worked. 6281 powered up with register value of
140          * 0x1f and bursts didn't work. The NI windows driver reads the
141          * register, then does a bitwise-or of 0x600 with it and writes it back.
142          */
143         unknown_dma_burst_bits =
144             readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
145         unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
146         writel(unknown_dma_burst_bits,
147                mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
148
149         csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
150         mite->num_channels = mite_csigr_dmac(csigr_bits);
151         if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
152                 dev_warn(dev->class_dev,
153                          "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
154                          mite->num_channels, MAX_MITE_DMA_CHANNELS);
155                 mite->num_channels = MAX_MITE_DMA_CHANNELS;
156         }
157         dump_chip_signature(csigr_bits);
158         for (i = 0; i < mite->num_channels; i++) {
159                 writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
160                 /* disable interrupts */
161                 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
162                        CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
163                        CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
164                        mite->mite_io_addr + MITE_CHCR(i));
165         }
166         mite->fifo_size = mite_fifo_size(mite, 0);
167         dev_info(dev->class_dev, "fifo size is %i.\n", mite->fifo_size);
168         return 0;
169 }
170 EXPORT_SYMBOL_GPL(mite_setup2);
171
172 void mite_detach(struct mite_struct *mite)
173 {
174         if (!mite)
175                 return;
176
177         if (mite->mite_io_addr)
178                 iounmap(mite->mite_io_addr);
179
180         kfree(mite);
181 }
182 EXPORT_SYMBOL_GPL(mite_detach);
183
184 struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite)
185 {
186         struct mite_dma_descriptor_ring *ring =
187             kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_KERNEL);
188
189         if (ring == NULL)
190                 return ring;
191         ring->hw_dev = get_device(&mite->pcidev->dev);
192         if (ring->hw_dev == NULL) {
193                 kfree(ring);
194                 return NULL;
195         }
196         ring->n_links = 0;
197         ring->descriptors = NULL;
198         ring->descriptors_dma_addr = 0;
199         return ring;
200 };
201 EXPORT_SYMBOL_GPL(mite_alloc_ring);
202
203 void mite_free_ring(struct mite_dma_descriptor_ring *ring)
204 {
205         if (ring) {
206                 if (ring->descriptors) {
207                         dma_free_coherent(ring->hw_dev,
208                                           ring->n_links *
209                                           sizeof(struct mite_dma_descriptor),
210                                           ring->descriptors,
211                                           ring->descriptors_dma_addr);
212                 }
213                 put_device(ring->hw_dev);
214                 kfree(ring);
215         }
216 };
217 EXPORT_SYMBOL_GPL(mite_free_ring);
218
219 struct mite_channel *mite_request_channel_in_range(struct mite_struct *mite,
220                                                    struct
221                                                    mite_dma_descriptor_ring
222                                                    *ring, unsigned min_channel,
223                                                    unsigned max_channel)
224 {
225         int i;
226         unsigned long flags;
227         struct mite_channel *channel = NULL;
228
229         /* spin lock so mite_release_channel can be called safely
230          * from interrupts
231          */
232         spin_lock_irqsave(&mite->lock, flags);
233         for (i = min_channel; i <= max_channel; ++i) {
234                 if (mite->channel_allocated[i] == 0) {
235                         mite->channel_allocated[i] = 1;
236                         channel = &mite->channels[i];
237                         channel->ring = ring;
238                         break;
239                 }
240         }
241         spin_unlock_irqrestore(&mite->lock, flags);
242         return channel;
243 }
244 EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
245
246 void mite_release_channel(struct mite_channel *mite_chan)
247 {
248         struct mite_struct *mite = mite_chan->mite;
249         unsigned long flags;
250
251         /*  spin lock to prevent races with mite_request_channel */
252         spin_lock_irqsave(&mite->lock, flags);
253         if (mite->channel_allocated[mite_chan->channel]) {
254                 mite_dma_disarm(mite_chan);
255                 mite_dma_reset(mite_chan);
256         /*
257          * disable all channel's interrupts (do it after disarm/reset so
258          * MITE_CHCR reg isn't changed while dma is still active!)
259          */
260                 writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
261                        CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
262                        CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
263                        CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
264                        mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
265                 mite->channel_allocated[mite_chan->channel] = 0;
266                 mite_chan->ring = NULL;
267                 mmiowb();
268         }
269         spin_unlock_irqrestore(&mite->lock, flags);
270 }
271 EXPORT_SYMBOL_GPL(mite_release_channel);
272
273 void mite_dma_arm(struct mite_channel *mite_chan)
274 {
275         struct mite_struct *mite = mite_chan->mite;
276         int chor;
277         unsigned long flags;
278
279         /*
280          * memory barrier is intended to insure any twiddling with the buffer
281          * is done before writing to the mite to arm dma transfer
282          */
283         smp_mb();
284         /* arm */
285         chor = CHOR_START;
286         spin_lock_irqsave(&mite->lock, flags);
287         mite_chan->done = 0;
288         writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
289         mmiowb();
290         spin_unlock_irqrestore(&mite->lock, flags);
291 /*       mite_dma_tcr(mite, channel); */
292 }
293 EXPORT_SYMBOL_GPL(mite_dma_arm);
294
295 /**************************************/
296
297 int mite_buf_change(struct mite_dma_descriptor_ring *ring,
298                     struct comedi_subdevice *s)
299 {
300         struct comedi_async *async = s->async;
301         unsigned int n_links;
302         int i;
303
304         if (ring->descriptors) {
305                 dma_free_coherent(ring->hw_dev,
306                                   ring->n_links *
307                                   sizeof(struct mite_dma_descriptor),
308                                   ring->descriptors,
309                                   ring->descriptors_dma_addr);
310         }
311         ring->descriptors = NULL;
312         ring->descriptors_dma_addr = 0;
313         ring->n_links = 0;
314
315         if (async->prealloc_bufsz == 0)
316                 return 0;
317
318         n_links = async->prealloc_bufsz >> PAGE_SHIFT;
319
320         ring->descriptors =
321             dma_alloc_coherent(ring->hw_dev,
322                                n_links * sizeof(struct mite_dma_descriptor),
323                                &ring->descriptors_dma_addr, GFP_KERNEL);
324         if (!ring->descriptors) {
325                 dev_err(s->device->class_dev,
326                         "mite: ring buffer allocation failed\n");
327                 return -ENOMEM;
328         }
329         ring->n_links = n_links;
330
331         for (i = 0; i < n_links; i++) {
332                 ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
333                 ring->descriptors[i].addr =
334                     cpu_to_le32(async->buf_map->page_list[i].dma_addr);
335                 ring->descriptors[i].next =
336                     cpu_to_le32(ring->descriptors_dma_addr + (i +
337                                                               1) *
338                                 sizeof(struct mite_dma_descriptor));
339         }
340         ring->descriptors[n_links - 1].next =
341             cpu_to_le32(ring->descriptors_dma_addr);
342         /*
343          * barrier is meant to insure that all the writes to the dma descriptors
344          * have completed before the dma controller is commanded to read them
345          */
346         smp_wmb();
347         return 0;
348 }
349 EXPORT_SYMBOL_GPL(mite_buf_change);
350
351 void mite_prep_dma(struct mite_channel *mite_chan,
352                    unsigned int num_device_bits, unsigned int num_memory_bits)
353 {
354         unsigned int chor, chcr, mcr, dcr, lkcr;
355         struct mite_struct *mite = mite_chan->mite;
356
357         /* reset DMA and FIFO */
358         chor = CHOR_DMARESET | CHOR_FRESET;
359         writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
360
361         /* short link chaining mode */
362         chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
363             CHCR_BURSTEN;
364         /*
365          * Link Complete Interrupt: interrupt every time a link
366          * in MITE_RING is completed. This can generate a lot of
367          * extra interrupts, but right now we update the values
368          * of buf_int_ptr and buf_int_count at each interrupt. A
369          * better method is to poll the MITE before each user
370          * "read()" to calculate the number of bytes available.
371          */
372         chcr |= CHCR_SET_LC_IE;
373         if (num_memory_bits == 32 && num_device_bits == 16) {
374                 /*
375                  * Doing a combined 32 and 16 bit byteswap gets the 16 bit
376                  * samples into the fifo in the right order. Tested doing 32 bit
377                  * memory to 16 bit device transfers to the analog out of a
378                  * pxi-6281, which has mite version = 1, type = 4. This also
379                  * works for dma reads from the counters on e-series boards.
380                  */
381                 chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY;
382         }
383         if (mite_chan->dir == COMEDI_INPUT)
384                 chcr |= CHCR_DEV_TO_MEM;
385
386         writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
387
388         /* to/from memory */
389         mcr = CR_RL(64) | CR_ASEQUP;
390         switch (num_memory_bits) {
391         case 8:
392                 mcr |= CR_PSIZE8;
393                 break;
394         case 16:
395                 mcr |= CR_PSIZE16;
396                 break;
397         case 32:
398                 mcr |= CR_PSIZE32;
399                 break;
400         default:
401                 pr_warn("bug! invalid mem bit width for dma transfer\n");
402                 break;
403         }
404         writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel));
405
406         /* from/to device */
407         dcr = CR_RL(64) | CR_ASEQUP;
408         dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel);
409         switch (num_device_bits) {
410         case 8:
411                 dcr |= CR_PSIZE8;
412                 break;
413         case 16:
414                 dcr |= CR_PSIZE16;
415                 break;
416         case 32:
417                 dcr |= CR_PSIZE32;
418                 break;
419         default:
420                 pr_warn("bug! invalid dev bit width for dma transfer\n");
421                 break;
422         }
423         writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel));
424
425         /* reset the DAR */
426         writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel));
427
428         /* the link is 32bits */
429         lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32;
430         writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel));
431
432         /* starting address for link chaining */
433         writel(mite_chan->ring->descriptors_dma_addr,
434                mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
435 }
436 EXPORT_SYMBOL_GPL(mite_prep_dma);
437
438 static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
439 {
440         struct mite_struct *mite = mite_chan->mite;
441
442         return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel));
443 }
444
445 u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
446 {
447         struct mite_struct *mite = mite_chan->mite;
448
449         return readl(mite->mite_io_addr +
450                      MITE_FCR(mite_chan->channel)) & 0x000000FF;
451 }
452 EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
453
454 /* returns lower bound for number of bytes transferred from device to memory */
455 u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
456 {
457         u32 device_byte_count;
458
459         device_byte_count = mite_device_bytes_transferred(mite_chan);
460         return device_byte_count - mite_bytes_in_transit(mite_chan);
461 }
462 EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_lb);
463
464 /* returns upper bound for number of bytes transferred from device to memory */
465 u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
466 {
467         u32 in_transit_count;
468
469         in_transit_count = mite_bytes_in_transit(mite_chan);
470         return mite_device_bytes_transferred(mite_chan) - in_transit_count;
471 }
472 EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_ub);
473
474 /* returns lower bound for number of bytes read from memory to device */
475 u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
476 {
477         u32 device_byte_count;
478
479         device_byte_count = mite_device_bytes_transferred(mite_chan);
480         return device_byte_count + mite_bytes_in_transit(mite_chan);
481 }
482 EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_lb);
483
484 /* returns upper bound for number of bytes read from memory to device */
485 u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
486 {
487         u32 in_transit_count;
488
489         in_transit_count = mite_bytes_in_transit(mite_chan);
490         return mite_device_bytes_transferred(mite_chan) + in_transit_count;
491 }
492 EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub);
493
494 unsigned mite_dma_tcr(struct mite_channel *mite_chan)
495 {
496         struct mite_struct *mite = mite_chan->mite;
497         int lkar;
498
499         lkar = readl(mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
500         return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
501 }
502 EXPORT_SYMBOL_GPL(mite_dma_tcr);
503
504 void mite_dma_disarm(struct mite_channel *mite_chan)
505 {
506         struct mite_struct *mite = mite_chan->mite;
507         unsigned chor;
508
509         /* disarm */
510         chor = CHOR_ABORT;
511         writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
512 }
513 EXPORT_SYMBOL_GPL(mite_dma_disarm);
514
515 int mite_sync_input_dma(struct mite_channel *mite_chan,
516                         struct comedi_subdevice *s)
517 {
518         struct comedi_async *async = s->async;
519         int count;
520         unsigned int nbytes, old_alloc_count;
521
522         old_alloc_count = async->buf_write_alloc_count;
523         /* write alloc as much as we can */
524         comedi_buf_write_alloc(s, async->prealloc_bufsz);
525
526         nbytes = mite_bytes_written_to_memory_lb(mite_chan);
527         if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
528                   old_alloc_count) > 0) {
529                 dev_warn(s->device->class_dev,
530                          "mite: DMA overwrite of free area\n");
531                 async->events |= COMEDI_CB_OVERFLOW;
532                 return -1;
533         }
534
535         count = nbytes - async->buf_write_count;
536         /* it's possible count will be negative due to
537          * conservative value returned by mite_bytes_written_to_memory_lb */
538         if (count <= 0)
539                 return 0;
540
541         comedi_buf_write_free(s, count);
542         comedi_inc_scan_progress(s, count);
543         async->events |= COMEDI_CB_BLOCK;
544         return 0;
545 }
546 EXPORT_SYMBOL_GPL(mite_sync_input_dma);
547
548 int mite_sync_output_dma(struct mite_channel *mite_chan,
549                          struct comedi_subdevice *s)
550 {
551         struct comedi_async *async = s->async;
552         struct comedi_cmd *cmd = &async->cmd;
553         u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
554         unsigned int old_alloc_count = async->buf_read_alloc_count;
555         u32 nbytes_ub, nbytes_lb;
556         int count;
557
558         /*  read alloc as much as we can */
559         comedi_buf_read_alloc(s, async->prealloc_bufsz);
560         nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
561         if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
562                 nbytes_lb = stop_count;
563         nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
564         if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
565                 nbytes_ub = stop_count;
566         if ((int)(nbytes_ub - old_alloc_count) > 0) {
567                 dev_warn(s->device->class_dev, "mite: DMA underrun\n");
568                 async->events |= COMEDI_CB_OVERFLOW;
569                 return -1;
570         }
571         count = nbytes_lb - async->buf_read_count;
572         if (count <= 0)
573                 return 0;
574
575         if (count) {
576                 comedi_buf_read_free(s, count);
577                 async->events |= COMEDI_CB_BLOCK;
578         }
579         return 0;
580 }
581 EXPORT_SYMBOL_GPL(mite_sync_output_dma);
582
583 unsigned mite_get_status(struct mite_channel *mite_chan)
584 {
585         struct mite_struct *mite = mite_chan->mite;
586         unsigned status;
587         unsigned long flags;
588
589         spin_lock_irqsave(&mite->lock, flags);
590         status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel));
591         if (status & CHSR_DONE) {
592                 mite_chan->done = 1;
593                 writel(CHOR_CLRDONE,
594                        mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
595         }
596         mmiowb();
597         spin_unlock_irqrestore(&mite->lock, flags);
598         return status;
599 }
600 EXPORT_SYMBOL_GPL(mite_get_status);
601
602 int mite_done(struct mite_channel *mite_chan)
603 {
604         struct mite_struct *mite = mite_chan->mite;
605         unsigned long flags;
606         int done;
607
608         mite_get_status(mite_chan);
609         spin_lock_irqsave(&mite->lock, flags);
610         done = mite_chan->done;
611         spin_unlock_irqrestore(&mite->lock, flags);
612         return done;
613 }
614 EXPORT_SYMBOL_GPL(mite_done);
615
616 static int __init mite_module_init(void)
617 {
618         return 0;
619 }
620
621 static void __exit mite_module_exit(void)
622 {
623 }
624
625 module_init(mite_module_init);
626 module_exit(mite_module_exit);
627
628 MODULE_AUTHOR("Comedi http://www.comedi.org");
629 MODULE_DESCRIPTION("Comedi low-level driver");
630 MODULE_LICENSE("GPL");