ioat: fix self test interrupts
[cascardo/linux.git] / drivers / dma / ioat / dma.c
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2009 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  */
22
23 /*
24  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25  * copy operations.
26  */
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
37 #include "dma.h"
38 #include "registers.h"
39 #include "hw.h"
40
41 static int ioat_pending_level = 4;
42 module_param(ioat_pending_level, int, 0644);
43 MODULE_PARM_DESC(ioat_pending_level,
44                  "high-water mark for pushing ioat descriptors (default: 4)");
45
46 static void ioat_dma_chan_reset_part2(struct work_struct *work);
47 static void ioat_dma_chan_watchdog(struct work_struct *work);
48
49 /* internal functions */
50 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
52
53 static struct ioat_desc_sw *
54 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
55 static struct ioat_desc_sw *
56 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
57
58 static inline struct ioat_dma_chan *
59 ioat_chan_by_index(struct ioatdma_device *device, int index)
60 {
61         return device->idx[index];
62 }
63
64 /**
65  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
66  * @irq: interrupt id
67  * @data: interrupt data
68  */
69 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
70 {
71         struct ioatdma_device *instance = data;
72         struct ioat_dma_chan *ioat_chan;
73         unsigned long attnstatus;
74         int bit;
75         u8 intrctrl;
76
77         intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
78
79         if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
80                 return IRQ_NONE;
81
82         if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
83                 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
84                 return IRQ_NONE;
85         }
86
87         attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
88         for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
89                 ioat_chan = ioat_chan_by_index(instance, bit);
90                 tasklet_schedule(&ioat_chan->cleanup_task);
91         }
92
93         writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
94         return IRQ_HANDLED;
95 }
96
97 /**
98  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
99  * @irq: interrupt id
100  * @data: interrupt data
101  */
102 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
103 {
104         struct ioat_dma_chan *ioat_chan = data;
105
106         tasklet_schedule(&ioat_chan->cleanup_task);
107
108         return IRQ_HANDLED;
109 }
110
111 static void ioat_dma_cleanup_tasklet(unsigned long data);
112
113 /**
114  * ioat_dma_enumerate_channels - find and initialize the device's channels
115  * @device: the device to be enumerated
116  */
117 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
118 {
119         u8 xfercap_scale;
120         u32 xfercap;
121         int i;
122         struct ioat_dma_chan *ioat_chan;
123         struct device *dev = &device->pdev->dev;
124         struct dma_device *dma = &device->common;
125
126         INIT_LIST_HEAD(&dma->channels);
127         dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
128         xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
129         xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
130
131 #ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
132         if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
133                 dma->chancnt--;
134 #endif
135         for (i = 0; i < dma->chancnt; i++) {
136                 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
137                 if (!ioat_chan) {
138                         dma->chancnt = i;
139                         break;
140                 }
141
142                 ioat_chan->device = device;
143                 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
144                 ioat_chan->xfercap = xfercap;
145                 ioat_chan->desccount = 0;
146                 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
147                 spin_lock_init(&ioat_chan->cleanup_lock);
148                 spin_lock_init(&ioat_chan->desc_lock);
149                 INIT_LIST_HEAD(&ioat_chan->free_desc);
150                 INIT_LIST_HEAD(&ioat_chan->used_desc);
151                 /* This should be made common somewhere in dmaengine.c */
152                 ioat_chan->common.device = &device->common;
153                 list_add_tail(&ioat_chan->common.device_node, &dma->channels);
154                 device->idx[i] = ioat_chan;
155                 tasklet_init(&ioat_chan->cleanup_task,
156                              ioat_dma_cleanup_tasklet,
157                              (unsigned long) ioat_chan);
158                 tasklet_disable(&ioat_chan->cleanup_task);
159         }
160         return dma->chancnt;
161 }
162
163 /**
164  * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
165  *                                 descriptors to hw
166  * @chan: DMA channel handle
167  */
168 static inline void
169 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan)
170 {
171         ioat_chan->pending = 0;
172         writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
173 }
174
175 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
176 {
177         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
178
179         if (ioat_chan->pending > 0) {
180                 spin_lock_bh(&ioat_chan->desc_lock);
181                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
182                 spin_unlock_bh(&ioat_chan->desc_lock);
183         }
184 }
185
186 static inline void
187 __ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan)
188 {
189         ioat_chan->pending = 0;
190         writew(ioat_chan->dmacount,
191                ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
192 }
193
194 static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
195 {
196         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
197
198         if (ioat_chan->pending > 0) {
199                 spin_lock_bh(&ioat_chan->desc_lock);
200                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
201                 spin_unlock_bh(&ioat_chan->desc_lock);
202         }
203 }
204
205
206 /**
207  * ioat_dma_chan_reset_part2 - reinit the channel after a reset
208  */
209 static void ioat_dma_chan_reset_part2(struct work_struct *work)
210 {
211         struct ioat_dma_chan *ioat_chan =
212                 container_of(work, struct ioat_dma_chan, work.work);
213         struct ioat_desc_sw *desc;
214
215         spin_lock_bh(&ioat_chan->cleanup_lock);
216         spin_lock_bh(&ioat_chan->desc_lock);
217
218         ioat_chan->completion_virt->low = 0;
219         ioat_chan->completion_virt->high = 0;
220         ioat_chan->pending = 0;
221
222         /*
223          * count the descriptors waiting, and be sure to do it
224          * right for both the CB1 line and the CB2 ring
225          */
226         ioat_chan->dmacount = 0;
227         if (ioat_chan->used_desc.prev) {
228                 desc = to_ioat_desc(ioat_chan->used_desc.prev);
229                 do {
230                         ioat_chan->dmacount++;
231                         desc = to_ioat_desc(desc->node.next);
232                 } while (&desc->node != ioat_chan->used_desc.next);
233         }
234
235         /*
236          * write the new starting descriptor address
237          * this puts channel engine into ARMED state
238          */
239         desc = to_ioat_desc(ioat_chan->used_desc.prev);
240         switch (ioat_chan->device->version) {
241         case IOAT_VER_1_2:
242                 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
243                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
244                 writel(((u64) desc->txd.phys) >> 32,
245                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
246
247                 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
248                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
249                 break;
250         case IOAT_VER_2_0:
251                 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
252                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
253                 writel(((u64) desc->txd.phys) >> 32,
254                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
255
256                 /* tell the engine to go with what's left to be done */
257                 writew(ioat_chan->dmacount,
258                        ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
259
260                 break;
261         }
262         dev_err(to_dev(ioat_chan),
263                 "chan%d reset - %d descs waiting, %d total desc\n",
264                 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
265
266         spin_unlock_bh(&ioat_chan->desc_lock);
267         spin_unlock_bh(&ioat_chan->cleanup_lock);
268 }
269
270 /**
271  * ioat_dma_reset_channel - restart a channel
272  * @ioat_chan: IOAT DMA channel handle
273  */
274 static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
275 {
276         u32 chansts, chanerr;
277
278         if (!ioat_chan->used_desc.prev)
279                 return;
280
281         chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
282         chansts = (ioat_chan->completion_virt->low
283                                         & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
284         if (chanerr) {
285                 dev_err(to_dev(ioat_chan),
286                         "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
287                         chan_num(ioat_chan), chansts, chanerr);
288                 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
289         }
290
291         /*
292          * whack it upside the head with a reset
293          * and wait for things to settle out.
294          * force the pending count to a really big negative
295          * to make sure no one forces an issue_pending
296          * while we're waiting.
297          */
298
299         spin_lock_bh(&ioat_chan->desc_lock);
300         ioat_chan->pending = INT_MIN;
301         writeb(IOAT_CHANCMD_RESET,
302                ioat_chan->reg_base
303                + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
304         spin_unlock_bh(&ioat_chan->desc_lock);
305
306         /* schedule the 2nd half instead of sleeping a long time */
307         schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
308 }
309
310 /**
311  * ioat_dma_chan_watchdog - watch for stuck channels
312  */
313 static void ioat_dma_chan_watchdog(struct work_struct *work)
314 {
315         struct ioatdma_device *device =
316                 container_of(work, struct ioatdma_device, work.work);
317         struct ioat_dma_chan *ioat_chan;
318         int i;
319
320         union {
321                 u64 full;
322                 struct {
323                         u32 low;
324                         u32 high;
325                 };
326         } completion_hw;
327         unsigned long compl_desc_addr_hw;
328
329         for (i = 0; i < device->common.chancnt; i++) {
330                 ioat_chan = ioat_chan_by_index(device, i);
331
332                 if (ioat_chan->device->version == IOAT_VER_1_2
333                         /* have we started processing anything yet */
334                     && ioat_chan->last_completion
335                         /* have we completed any since last watchdog cycle? */
336                     && (ioat_chan->last_completion ==
337                                 ioat_chan->watchdog_completion)
338                         /* has TCP stuck on one cookie since last watchdog? */
339                     && (ioat_chan->watchdog_tcp_cookie ==
340                                 ioat_chan->watchdog_last_tcp_cookie)
341                     && (ioat_chan->watchdog_tcp_cookie !=
342                                 ioat_chan->completed_cookie)
343                         /* is there something in the chain to be processed? */
344                         /* CB1 chain always has at least the last one processed */
345                     && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
346                     && ioat_chan->pending == 0) {
347
348                         /*
349                          * check CHANSTS register for completed
350                          * descriptor address.
351                          * if it is different than completion writeback,
352                          * it is not zero
353                          * and it has changed since the last watchdog
354                          *     we can assume that channel
355                          *     is still working correctly
356                          *     and the problem is in completion writeback.
357                          *     update completion writeback
358                          *     with actual CHANSTS value
359                          * else
360                          *     try resetting the channel
361                          */
362
363                         completion_hw.low = readl(ioat_chan->reg_base +
364                                 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
365                         completion_hw.high = readl(ioat_chan->reg_base +
366                                 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
367 #if (BITS_PER_LONG == 64)
368                         compl_desc_addr_hw =
369                                 completion_hw.full
370                                 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
371 #else
372                         compl_desc_addr_hw =
373                                 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
374 #endif
375
376                         if ((compl_desc_addr_hw != 0)
377                            && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
378                            && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
379                                 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
380                                 ioat_chan->completion_virt->low = completion_hw.low;
381                                 ioat_chan->completion_virt->high = completion_hw.high;
382                         } else {
383                                 ioat_dma_reset_channel(ioat_chan);
384                                 ioat_chan->watchdog_completion = 0;
385                                 ioat_chan->last_compl_desc_addr_hw = 0;
386                         }
387
388                 /*
389                  * for version 2.0 if there are descriptors yet to be processed
390                  * and the last completed hasn't changed since the last watchdog
391                  *      if they haven't hit the pending level
392                  *          issue the pending to push them through
393                  *      else
394                  *          try resetting the channel
395                  */
396                 } else if (ioat_chan->device->version == IOAT_VER_2_0
397                     && ioat_chan->used_desc.prev
398                     && ioat_chan->last_completion
399                     && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
400
401                         if (ioat_chan->pending < ioat_pending_level)
402                                 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
403                         else {
404                                 ioat_dma_reset_channel(ioat_chan);
405                                 ioat_chan->watchdog_completion = 0;
406                         }
407                 } else {
408                         ioat_chan->last_compl_desc_addr_hw = 0;
409                         ioat_chan->watchdog_completion
410                                         = ioat_chan->last_completion;
411                 }
412
413                 ioat_chan->watchdog_last_tcp_cookie =
414                         ioat_chan->watchdog_tcp_cookie;
415         }
416
417         schedule_delayed_work(&device->work, WATCHDOG_DELAY);
418 }
419
420 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
421 {
422         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
423         struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
424         struct ioat_desc_sw *first;
425         struct ioat_desc_sw *chain_tail;
426         dma_cookie_t cookie;
427
428         spin_lock_bh(&ioat_chan->desc_lock);
429         /* cookie incr and addition to used_list must be atomic */
430         cookie = ioat_chan->common.cookie;
431         cookie++;
432         if (cookie < 0)
433                 cookie = 1;
434         ioat_chan->common.cookie = tx->cookie = cookie;
435
436         /* write address into NextDescriptor field of last desc in chain */
437         first = to_ioat_desc(tx->tx_list.next);
438         chain_tail = to_ioat_desc(ioat_chan->used_desc.prev);
439         /* make descriptor updates globally visible before chaining */
440         wmb();
441         chain_tail->hw->next = first->txd.phys;
442         list_splice_tail_init(&tx->tx_list, &ioat_chan->used_desc);
443
444         ioat_chan->dmacount += desc->tx_cnt;
445         ioat_chan->pending += desc->tx_cnt;
446         if (ioat_chan->pending >= ioat_pending_level)
447                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
448         spin_unlock_bh(&ioat_chan->desc_lock);
449
450         return cookie;
451 }
452
453 static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
454 {
455         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
456         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
457         struct ioat_desc_sw *new;
458         struct ioat_dma_descriptor *hw;
459         dma_cookie_t cookie;
460         u32 copy;
461         size_t len;
462         dma_addr_t src, dst;
463         unsigned long orig_flags;
464         unsigned int desc_count = 0;
465
466         /* src and dest and len are stored in the initial descriptor */
467         len = first->len;
468         src = first->src;
469         dst = first->dst;
470         orig_flags = first->txd.flags;
471         new = first;
472
473         /*
474          * ioat_chan->desc_lock is still in force in version 2 path
475          * it gets unlocked at end of this function
476          */
477         do {
478                 copy = min_t(size_t, len, ioat_chan->xfercap);
479
480                 async_tx_ack(&new->txd);
481
482                 hw = new->hw;
483                 hw->size = copy;
484                 hw->ctl = 0;
485                 hw->src_addr = src;
486                 hw->dst_addr = dst;
487
488                 len -= copy;
489                 dst += copy;
490                 src += copy;
491                 desc_count++;
492         } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
493
494         if (!new) {
495                 dev_err(to_dev(ioat_chan), "tx submit failed\n");
496                 spin_unlock_bh(&ioat_chan->desc_lock);
497                 return -ENOMEM;
498         }
499
500         hw->ctl_f.compl_write = 1;
501         if (first->txd.callback) {
502                 hw->ctl_f.int_en = 1;
503                 if (first != new) {
504                         /* move callback into to last desc */
505                         new->txd.callback = first->txd.callback;
506                         new->txd.callback_param
507                                         = first->txd.callback_param;
508                         first->txd.callback = NULL;
509                         first->txd.callback_param = NULL;
510                 }
511         }
512
513         new->tx_cnt = desc_count;
514         new->txd.flags = orig_flags; /* client is in control of this ack */
515
516         /* store the original values for use in later cleanup */
517         if (new != first) {
518                 new->src = first->src;
519                 new->dst = first->dst;
520                 new->len = first->len;
521         }
522
523         /* cookie incr and addition to used_list must be atomic */
524         cookie = ioat_chan->common.cookie;
525         cookie++;
526         if (cookie < 0)
527                 cookie = 1;
528         ioat_chan->common.cookie = new->txd.cookie = cookie;
529
530         ioat_chan->dmacount += desc_count;
531         ioat_chan->pending += desc_count;
532         if (ioat_chan->pending >= ioat_pending_level)
533                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
534         spin_unlock_bh(&ioat_chan->desc_lock);
535
536         return cookie;
537 }
538
539 /**
540  * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
541  * @ioat_chan: the channel supplying the memory pool for the descriptors
542  * @flags: allocation flags
543  */
544 static struct ioat_desc_sw *
545 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags)
546 {
547         struct ioat_dma_descriptor *desc;
548         struct ioat_desc_sw *desc_sw;
549         struct ioatdma_device *ioatdma_device;
550         dma_addr_t phys;
551
552         ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
553         desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
554         if (unlikely(!desc))
555                 return NULL;
556
557         desc_sw = kzalloc(sizeof(*desc_sw), flags);
558         if (unlikely(!desc_sw)) {
559                 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
560                 return NULL;
561         }
562
563         memset(desc, 0, sizeof(*desc));
564         dma_async_tx_descriptor_init(&desc_sw->txd, &ioat_chan->common);
565         switch (ioat_chan->device->version) {
566         case IOAT_VER_1_2:
567                 desc_sw->txd.tx_submit = ioat1_tx_submit;
568                 break;
569         case IOAT_VER_2_0:
570         case IOAT_VER_3_0:
571                 desc_sw->txd.tx_submit = ioat2_tx_submit;
572                 break;
573         }
574
575         desc_sw->hw = desc;
576         desc_sw->txd.phys = phys;
577
578         return desc_sw;
579 }
580
581 static int ioat_initial_desc_count = 256;
582 module_param(ioat_initial_desc_count, int, 0644);
583 MODULE_PARM_DESC(ioat_initial_desc_count,
584                  "initial descriptors per channel (default: 256)");
585
586 /**
587  * ioat2_dma_massage_chan_desc - link the descriptors into a circle
588  * @ioat_chan: the channel to be massaged
589  */
590 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
591 {
592         struct ioat_desc_sw *desc, *_desc;
593
594         /* setup used_desc */
595         ioat_chan->used_desc.next = ioat_chan->free_desc.next;
596         ioat_chan->used_desc.prev = NULL;
597
598         /* pull free_desc out of the circle so that every node is a hw
599          * descriptor, but leave it pointing to the list
600          */
601         ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
602         ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
603
604         /* circle link the hw descriptors */
605         desc = to_ioat_desc(ioat_chan->free_desc.next);
606         desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys;
607         list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
608                 desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys;
609         }
610 }
611
612 /**
613  * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
614  * @chan: the channel to be filled out
615  */
616 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
617 {
618         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
619         struct ioat_desc_sw *desc;
620         u16 chanctrl;
621         u32 chanerr;
622         int i;
623         LIST_HEAD(tmp_list);
624
625         /* have we already been set up? */
626         if (!list_empty(&ioat_chan->free_desc))
627                 return ioat_chan->desccount;
628
629         /* Setup register to interrupt and write completion status on error */
630         chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
631                 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
632                 IOAT_CHANCTRL_ERR_COMPLETION_EN;
633         writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
634
635         chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
636         if (chanerr) {
637                 dev_err(to_dev(ioat_chan), "CHANERR = %x, clearing\n", chanerr);
638                 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
639         }
640
641         /* Allocate descriptors */
642         for (i = 0; i < ioat_initial_desc_count; i++) {
643                 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
644                 if (!desc) {
645                         dev_err(to_dev(ioat_chan),
646                                 "Only %d initial descriptors\n", i);
647                         break;
648                 }
649                 list_add_tail(&desc->node, &tmp_list);
650         }
651         spin_lock_bh(&ioat_chan->desc_lock);
652         ioat_chan->desccount = i;
653         list_splice(&tmp_list, &ioat_chan->free_desc);
654         if (ioat_chan->device->version != IOAT_VER_1_2)
655                 ioat2_dma_massage_chan_desc(ioat_chan);
656         spin_unlock_bh(&ioat_chan->desc_lock);
657
658         /* allocate a completion writeback area */
659         /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
660         ioat_chan->completion_virt =
661                 pci_pool_alloc(ioat_chan->device->completion_pool,
662                                GFP_KERNEL,
663                                &ioat_chan->completion_addr);
664         memset(ioat_chan->completion_virt, 0,
665                sizeof(*ioat_chan->completion_virt));
666         writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
667                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
668         writel(((u64) ioat_chan->completion_addr) >> 32,
669                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
670
671         tasklet_enable(&ioat_chan->cleanup_task);
672         ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */
673         return ioat_chan->desccount;
674 }
675
676 /**
677  * ioat_dma_free_chan_resources - release all the descriptors
678  * @chan: the channel to be cleaned
679  */
680 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
681 {
682         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
683         struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
684         struct ioat_desc_sw *desc, *_desc;
685         int in_use_descs = 0;
686
687         /* Before freeing channel resources first check
688          * if they have been previously allocated for this channel.
689          */
690         if (ioat_chan->desccount == 0)
691                 return;
692
693         tasklet_disable(&ioat_chan->cleanup_task);
694         ioat_dma_memcpy_cleanup(ioat_chan);
695
696         /* Delay 100ms after reset to allow internal DMA logic to quiesce
697          * before removing DMA descriptor resources.
698          */
699         writeb(IOAT_CHANCMD_RESET,
700                ioat_chan->reg_base
701                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
702         mdelay(100);
703
704         spin_lock_bh(&ioat_chan->desc_lock);
705         switch (ioat_chan->device->version) {
706         case IOAT_VER_1_2:
707                 list_for_each_entry_safe(desc, _desc,
708                                          &ioat_chan->used_desc, node) {
709                         in_use_descs++;
710                         list_del(&desc->node);
711                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
712                                       desc->txd.phys);
713                         kfree(desc);
714                 }
715                 list_for_each_entry_safe(desc, _desc,
716                                          &ioat_chan->free_desc, node) {
717                         list_del(&desc->node);
718                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
719                                       desc->txd.phys);
720                         kfree(desc);
721                 }
722                 break;
723         case IOAT_VER_2_0:
724         case IOAT_VER_3_0:
725                 list_for_each_entry_safe(desc, _desc,
726                                          ioat_chan->free_desc.next, node) {
727                         list_del(&desc->node);
728                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
729                                       desc->txd.phys);
730                         kfree(desc);
731                 }
732                 desc = to_ioat_desc(ioat_chan->free_desc.next);
733                 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
734                               desc->txd.phys);
735                 kfree(desc);
736                 INIT_LIST_HEAD(&ioat_chan->free_desc);
737                 INIT_LIST_HEAD(&ioat_chan->used_desc);
738                 break;
739         }
740         spin_unlock_bh(&ioat_chan->desc_lock);
741
742         pci_pool_free(ioatdma_device->completion_pool,
743                       ioat_chan->completion_virt,
744                       ioat_chan->completion_addr);
745
746         /* one is ok since we left it on there on purpose */
747         if (in_use_descs > 1)
748                 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
749                         in_use_descs - 1);
750
751         ioat_chan->last_completion = ioat_chan->completion_addr = 0;
752         ioat_chan->pending = 0;
753         ioat_chan->dmacount = 0;
754         ioat_chan->desccount = 0;
755         ioat_chan->watchdog_completion = 0;
756         ioat_chan->last_compl_desc_addr_hw = 0;
757         ioat_chan->watchdog_tcp_cookie =
758                 ioat_chan->watchdog_last_tcp_cookie = 0;
759 }
760
761 /**
762  * ioat_dma_get_next_descriptor - return the next available descriptor
763  * @ioat_chan: IOAT DMA channel handle
764  *
765  * Gets the next descriptor from the chain, and must be called with the
766  * channel's desc_lock held.  Allocates more descriptors if the channel
767  * has run out.
768  */
769 static struct ioat_desc_sw *
770 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
771 {
772         struct ioat_desc_sw *new;
773
774         if (!list_empty(&ioat_chan->free_desc)) {
775                 new = to_ioat_desc(ioat_chan->free_desc.next);
776                 list_del(&new->node);
777         } else {
778                 /* try to get another desc */
779                 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
780                 if (!new) {
781                         dev_err(to_dev(ioat_chan), "alloc failed\n");
782                         return NULL;
783                 }
784         }
785
786         prefetch(new->hw);
787         return new;
788 }
789
790 static struct ioat_desc_sw *
791 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
792 {
793         struct ioat_desc_sw *new;
794
795         /*
796          * used.prev points to where to start processing
797          * used.next points to next free descriptor
798          * if used.prev == NULL, there are none waiting to be processed
799          * if used.next == used.prev.prev, there is only one free descriptor,
800          *      and we need to use it to as a noop descriptor before
801          *      linking in a new set of descriptors, since the device
802          *      has probably already read the pointer to it
803          */
804         if (ioat_chan->used_desc.prev &&
805             ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
806
807                 struct ioat_desc_sw *desc;
808                 struct ioat_desc_sw *noop_desc;
809                 int i;
810
811                 /* set up the noop descriptor */
812                 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
813                 /* set size to non-zero value (channel returns error when size is 0) */
814                 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
815                 noop_desc->hw->ctl = 0;
816                 noop_desc->hw->ctl_f.null = 1;
817                 noop_desc->hw->src_addr = 0;
818                 noop_desc->hw->dst_addr = 0;
819
820                 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
821                 ioat_chan->pending++;
822                 ioat_chan->dmacount++;
823
824                 /* try to get a few more descriptors */
825                 for (i = 16; i; i--) {
826                         desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
827                         if (!desc) {
828                                 dev_err(to_dev(ioat_chan), "alloc failed\n");
829                                 break;
830                         }
831                         list_add_tail(&desc->node, ioat_chan->used_desc.next);
832
833                         desc->hw->next
834                                 = to_ioat_desc(desc->node.next)->txd.phys;
835                         to_ioat_desc(desc->node.prev)->hw->next
836                                 = desc->txd.phys;
837                         ioat_chan->desccount++;
838                 }
839
840                 ioat_chan->used_desc.next = noop_desc->node.next;
841         }
842         new = to_ioat_desc(ioat_chan->used_desc.next);
843         prefetch(new);
844         ioat_chan->used_desc.next = new->node.next;
845
846         if (ioat_chan->used_desc.prev == NULL)
847                 ioat_chan->used_desc.prev = &new->node;
848
849         prefetch(new->hw);
850         return new;
851 }
852
853 static struct ioat_desc_sw *
854 ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
855 {
856         if (!ioat_chan)
857                 return NULL;
858
859         switch (ioat_chan->device->version) {
860         case IOAT_VER_1_2:
861                 return ioat1_dma_get_next_descriptor(ioat_chan);
862         case IOAT_VER_2_0:
863         case IOAT_VER_3_0:
864                 return ioat2_dma_get_next_descriptor(ioat_chan);
865         }
866         return NULL;
867 }
868
869 static struct dma_async_tx_descriptor *
870 ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
871                       dma_addr_t dma_src, size_t len, unsigned long flags)
872 {
873         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
874         struct ioat_desc_sw *desc;
875         size_t copy;
876         LIST_HEAD(chain);
877         dma_addr_t src = dma_src;
878         dma_addr_t dest = dma_dest;
879         size_t total_len = len;
880         struct ioat_dma_descriptor *hw = NULL;
881         int tx_cnt = 0;
882
883         spin_lock_bh(&ioat_chan->desc_lock);
884         desc = ioat_dma_get_next_descriptor(ioat_chan);
885         do {
886                 if (!desc)
887                         break;
888
889                 tx_cnt++;
890                 copy = min_t(size_t, len, ioat_chan->xfercap);
891
892                 hw = desc->hw;
893                 hw->size = copy;
894                 hw->ctl = 0;
895                 hw->src_addr = src;
896                 hw->dst_addr = dest;
897
898                 list_add_tail(&desc->node, &chain);
899
900                 len -= copy;
901                 dest += copy;
902                 src += copy;
903                 if (len) {
904                         struct ioat_desc_sw *next;
905
906                         async_tx_ack(&desc->txd);
907                         next = ioat_dma_get_next_descriptor(ioat_chan);
908                         hw->next = next ? next->txd.phys : 0;
909                         desc = next;
910                 } else
911                         hw->next = 0;
912         } while (len);
913
914         if (!desc) {
915                 dev_err(to_dev(ioat_chan),
916                         "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
917                         chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
918                 list_splice(&chain, &ioat_chan->free_desc);
919                 spin_unlock_bh(&ioat_chan->desc_lock);
920                 return NULL;
921         }
922         spin_unlock_bh(&ioat_chan->desc_lock);
923
924         desc->txd.flags = flags;
925         desc->tx_cnt = tx_cnt;
926         desc->src = dma_src;
927         desc->dst = dma_dest;
928         desc->len = total_len;
929         list_splice(&chain, &desc->txd.tx_list);
930         hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
931         hw->ctl_f.compl_write = 1;
932
933         return &desc->txd;
934 }
935
936 static struct dma_async_tx_descriptor *
937 ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
938                       dma_addr_t dma_src, size_t len, unsigned long flags)
939 {
940         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
941         struct ioat_desc_sw *new;
942
943         spin_lock_bh(&ioat_chan->desc_lock);
944         new = ioat2_dma_get_next_descriptor(ioat_chan);
945
946         /*
947          * leave ioat_chan->desc_lock set in ioat 2 path
948          * it will get unlocked at end of tx_submit
949          */
950
951         if (new) {
952                 new->len = len;
953                 new->dst = dma_dest;
954                 new->src = dma_src;
955                 new->txd.flags = flags;
956                 return &new->txd;
957         } else {
958                 spin_unlock_bh(&ioat_chan->desc_lock);
959                 dev_err(to_dev(ioat_chan),
960                         "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
961                         chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
962                 return NULL;
963         }
964 }
965
966 static void ioat_dma_cleanup_tasklet(unsigned long data)
967 {
968         struct ioat_dma_chan *chan = (void *)data;
969         ioat_dma_memcpy_cleanup(chan);
970         writew(IOAT_CHANCTRL_INT_DISABLE,
971                chan->reg_base + IOAT_CHANCTRL_OFFSET);
972 }
973
974 static void
975 ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
976 {
977         if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
978                 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
979                         pci_unmap_single(ioat_chan->device->pdev,
980                                          pci_unmap_addr(desc, dst),
981                                          pci_unmap_len(desc, len),
982                                          PCI_DMA_FROMDEVICE);
983                 else
984                         pci_unmap_page(ioat_chan->device->pdev,
985                                        pci_unmap_addr(desc, dst),
986                                        pci_unmap_len(desc, len),
987                                        PCI_DMA_FROMDEVICE);
988         }
989
990         if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
991                 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
992                         pci_unmap_single(ioat_chan->device->pdev,
993                                          pci_unmap_addr(desc, src),
994                                          pci_unmap_len(desc, len),
995                                          PCI_DMA_TODEVICE);
996                 else
997                         pci_unmap_page(ioat_chan->device->pdev,
998                                        pci_unmap_addr(desc, src),
999                                        pci_unmap_len(desc, len),
1000                                        PCI_DMA_TODEVICE);
1001         }
1002 }
1003
1004 /**
1005  * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
1006  * @chan: ioat channel to be cleaned up
1007  */
1008 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1009 {
1010         unsigned long phys_complete;
1011         struct ioat_desc_sw *desc, *_desc;
1012         dma_cookie_t cookie = 0;
1013         unsigned long desc_phys;
1014         struct ioat_desc_sw *latest_desc;
1015         struct dma_async_tx_descriptor *tx;
1016
1017         prefetch(ioat_chan->completion_virt);
1018
1019         if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
1020                 return;
1021
1022         /* The completion writeback can happen at any time,
1023            so reads by the driver need to be atomic operations
1024            The descriptor physical addresses are limited to 32-bits
1025            when the CPU can only do a 32-bit mov */
1026
1027 #if (BITS_PER_LONG == 64)
1028         phys_complete =
1029                 ioat_chan->completion_virt->full
1030                 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1031 #else
1032         phys_complete =
1033                 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
1034 #endif
1035
1036         if ((ioat_chan->completion_virt->full
1037                 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
1038                                 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
1039                 dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n",
1040                         readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
1041
1042                 /* TODO do something to salvage the situation */
1043         }
1044
1045         if (phys_complete == ioat_chan->last_completion) {
1046                 spin_unlock_bh(&ioat_chan->cleanup_lock);
1047                 /*
1048                  * perhaps we're stuck so hard that the watchdog can't go off?
1049                  * try to catch it after 2 seconds
1050                  */
1051                 if (ioat_chan->device->version != IOAT_VER_3_0) {
1052                         if (time_after(jiffies,
1053                                        ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1054                                 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1055                                 ioat_chan->last_completion_time = jiffies;
1056                         }
1057                 }
1058                 return;
1059         }
1060         ioat_chan->last_completion_time = jiffies;
1061
1062         cookie = 0;
1063         if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1064                 spin_unlock_bh(&ioat_chan->cleanup_lock);
1065                 return;
1066         }
1067
1068         switch (ioat_chan->device->version) {
1069         case IOAT_VER_1_2:
1070                 list_for_each_entry_safe(desc, _desc,
1071                                          &ioat_chan->used_desc, node) {
1072                         tx = &desc->txd;
1073                         /*
1074                          * Incoming DMA requests may use multiple descriptors,
1075                          * due to exceeding xfercap, perhaps. If so, only the
1076                          * last one will have a cookie, and require unmapping.
1077                          */
1078                         if (tx->cookie) {
1079                                 cookie = tx->cookie;
1080                                 ioat_dma_unmap(ioat_chan, desc);
1081                                 if (tx->callback) {
1082                                         tx->callback(tx->callback_param);
1083                                         tx->callback = NULL;
1084                                 }
1085                         }
1086
1087                         if (tx->phys != phys_complete) {
1088                                 /*
1089                                  * a completed entry, but not the last, so clean
1090                                  * up if the client is done with the descriptor
1091                                  */
1092                                 if (async_tx_test_ack(tx)) {
1093                                         list_move_tail(&desc->node,
1094                                                        &ioat_chan->free_desc);
1095                                 } else
1096                                         tx->cookie = 0;
1097                         } else {
1098                                 /*
1099                                  * last used desc. Do not remove, so we can
1100                                  * append from it, but don't look at it next
1101                                  * time, either
1102                                  */
1103                                 tx->cookie = 0;
1104
1105                                 /* TODO check status bits? */
1106                                 break;
1107                         }
1108                 }
1109                 break;
1110         case IOAT_VER_2_0:
1111         case IOAT_VER_3_0:
1112                 /* has some other thread has already cleaned up? */
1113                 if (ioat_chan->used_desc.prev == NULL)
1114                         break;
1115
1116                 /* work backwards to find latest finished desc */
1117                 desc = to_ioat_desc(ioat_chan->used_desc.next);
1118                 tx = &desc->txd;
1119                 latest_desc = NULL;
1120                 do {
1121                         desc = to_ioat_desc(desc->node.prev);
1122                         desc_phys = (unsigned long)tx->phys
1123                                        & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1124                         if (desc_phys == phys_complete) {
1125                                 latest_desc = desc;
1126                                 break;
1127                         }
1128                 } while (&desc->node != ioat_chan->used_desc.prev);
1129
1130                 if (latest_desc != NULL) {
1131                         /* work forwards to clear finished descriptors */
1132                         for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
1133                              &desc->node != latest_desc->node.next &&
1134                              &desc->node != ioat_chan->used_desc.next;
1135                              desc = to_ioat_desc(desc->node.next)) {
1136                                 if (tx->cookie) {
1137                                         cookie = tx->cookie;
1138                                         tx->cookie = 0;
1139                                         ioat_dma_unmap(ioat_chan, desc);
1140                                         if (tx->callback) {
1141                                                 tx->callback(tx->callback_param);
1142                                                 tx->callback = NULL;
1143                                         }
1144                                 }
1145                         }
1146
1147                         /* move used.prev up beyond those that are finished */
1148                         if (&desc->node == ioat_chan->used_desc.next)
1149                                 ioat_chan->used_desc.prev = NULL;
1150                         else
1151                                 ioat_chan->used_desc.prev = &desc->node;
1152                 }
1153                 break;
1154         }
1155
1156         spin_unlock_bh(&ioat_chan->desc_lock);
1157
1158         ioat_chan->last_completion = phys_complete;
1159         if (cookie != 0)
1160                 ioat_chan->completed_cookie = cookie;
1161
1162         spin_unlock_bh(&ioat_chan->cleanup_lock);
1163 }
1164
1165 /**
1166  * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
1167  * @chan: IOAT DMA channel handle
1168  * @cookie: DMA transaction identifier
1169  * @done: if not %NULL, updated with last completed transaction
1170  * @used: if not %NULL, updated with last used transaction
1171  */
1172 static enum dma_status
1173 ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie,
1174                      dma_cookie_t *done, dma_cookie_t *used)
1175 {
1176         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
1177         dma_cookie_t last_used;
1178         dma_cookie_t last_complete;
1179         enum dma_status ret;
1180
1181         last_used = chan->cookie;
1182         last_complete = ioat_chan->completed_cookie;
1183         ioat_chan->watchdog_tcp_cookie = cookie;
1184
1185         if (done)
1186                 *done = last_complete;
1187         if (used)
1188                 *used = last_used;
1189
1190         ret = dma_async_is_complete(cookie, last_complete, last_used);
1191         if (ret == DMA_SUCCESS)
1192                 return ret;
1193
1194         ioat_dma_memcpy_cleanup(ioat_chan);
1195
1196         last_used = chan->cookie;
1197         last_complete = ioat_chan->completed_cookie;
1198
1199         if (done)
1200                 *done = last_complete;
1201         if (used)
1202                 *used = last_used;
1203
1204         return dma_async_is_complete(cookie, last_complete, last_used);
1205 }
1206
1207 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
1208 {
1209         struct ioat_desc_sw *desc;
1210         struct ioat_dma_descriptor *hw;
1211
1212         spin_lock_bh(&ioat_chan->desc_lock);
1213
1214         desc = ioat_dma_get_next_descriptor(ioat_chan);
1215
1216         if (!desc) {
1217                 dev_err(to_dev(ioat_chan),
1218                         "Unable to start null desc - get next desc failed\n");
1219                 spin_unlock_bh(&ioat_chan->desc_lock);
1220                 return;
1221         }
1222
1223         hw = desc->hw;
1224         hw->ctl = 0;
1225         hw->ctl_f.null = 1;
1226         hw->ctl_f.int_en = 1;
1227         hw->ctl_f.compl_write = 1;
1228         /* set size to non-zero value (channel returns error when size is 0) */
1229         hw->size = NULL_DESC_BUFFER_SIZE;
1230         hw->src_addr = 0;
1231         hw->dst_addr = 0;
1232         async_tx_ack(&desc->txd);
1233         switch (ioat_chan->device->version) {
1234         case IOAT_VER_1_2:
1235                 hw->next = 0;
1236                 list_add_tail(&desc->node, &ioat_chan->used_desc);
1237
1238                 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
1239                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
1240                 writel(((u64) desc->txd.phys) >> 32,
1241                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
1242
1243                 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
1244                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1245                 break;
1246         case IOAT_VER_2_0:
1247         case IOAT_VER_3_0:
1248                 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
1249                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1250                 writel(((u64) desc->txd.phys) >> 32,
1251                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1252
1253                 ioat_chan->dmacount++;
1254                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1255                 break;
1256         }
1257         spin_unlock_bh(&ioat_chan->desc_lock);
1258 }
1259
1260 /*
1261  * Perform a IOAT transaction to verify the HW works.
1262  */
1263 #define IOAT_TEST_SIZE 2000
1264
1265 static void ioat_dma_test_callback(void *dma_async_param)
1266 {
1267         struct completion *cmp = dma_async_param;
1268
1269         complete(cmp);
1270 }
1271
1272 /**
1273  * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1274  * @device: device to be tested
1275  */
1276 static int ioat_dma_self_test(struct ioatdma_device *device)
1277 {
1278         int i;
1279         u8 *src;
1280         u8 *dest;
1281         struct dma_device *dma = &device->common;
1282         struct device *dev = &device->pdev->dev;
1283         struct dma_chan *dma_chan;
1284         struct dma_async_tx_descriptor *tx;
1285         dma_addr_t dma_dest, dma_src;
1286         dma_cookie_t cookie;
1287         int err = 0;
1288         struct completion cmp;
1289         unsigned long tmo;
1290         unsigned long flags;
1291
1292         src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1293         if (!src)
1294                 return -ENOMEM;
1295         dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1296         if (!dest) {
1297                 kfree(src);
1298                 return -ENOMEM;
1299         }
1300
1301         /* Fill in src buffer */
1302         for (i = 0; i < IOAT_TEST_SIZE; i++)
1303                 src[i] = (u8)i;
1304
1305         /* Start copy, using first DMA channel */
1306         dma_chan = container_of(dma->channels.next, struct dma_chan,
1307                                 device_node);
1308         if (dma->device_alloc_chan_resources(dma_chan) < 1) {
1309                 dev_err(dev, "selftest cannot allocate chan resource\n");
1310                 err = -ENODEV;
1311                 goto out;
1312         }
1313
1314         dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
1315         dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
1316         flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
1317                 DMA_PREP_INTERRUPT;
1318         tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
1319                                                    IOAT_TEST_SIZE, flags);
1320         if (!tx) {
1321                 dev_err(dev, "Self-test prep failed, disabling\n");
1322                 err = -ENODEV;
1323                 goto free_resources;
1324         }
1325
1326         async_tx_ack(tx);
1327         init_completion(&cmp);
1328         tx->callback = ioat_dma_test_callback;
1329         tx->callback_param = &cmp;
1330         cookie = tx->tx_submit(tx);
1331         if (cookie < 0) {
1332                 dev_err(dev, "Self-test setup failed, disabling\n");
1333                 err = -ENODEV;
1334                 goto free_resources;
1335         }
1336         dma->device_issue_pending(dma_chan);
1337
1338         tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1339
1340         if (tmo == 0 ||
1341             dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1342                                         != DMA_SUCCESS) {
1343                 dev_err(dev, "Self-test copy timed out, disabling\n");
1344                 err = -ENODEV;
1345                 goto free_resources;
1346         }
1347         if (memcmp(src, dest, IOAT_TEST_SIZE)) {
1348                 dev_err(dev, "Self-test copy failed compare, disabling\n");
1349                 err = -ENODEV;
1350                 goto free_resources;
1351         }
1352
1353 free_resources:
1354         dma->device_free_chan_resources(dma_chan);
1355 out:
1356         kfree(src);
1357         kfree(dest);
1358         return err;
1359 }
1360
1361 static char ioat_interrupt_style[32] = "msix";
1362 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1363                     sizeof(ioat_interrupt_style), 0644);
1364 MODULE_PARM_DESC(ioat_interrupt_style,
1365                  "set ioat interrupt style: msix (default), "
1366                  "msix-single-vector, msi, intx)");
1367
1368 /**
1369  * ioat_dma_setup_interrupts - setup interrupt handler
1370  * @device: ioat device
1371  */
1372 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1373 {
1374         struct ioat_dma_chan *ioat_chan;
1375         struct pci_dev *pdev = device->pdev;
1376         struct device *dev = &pdev->dev;
1377         struct msix_entry *msix;
1378         int i, j, msixcnt;
1379         int err = -EINVAL;
1380         u8 intrctrl = 0;
1381
1382         if (!strcmp(ioat_interrupt_style, "msix"))
1383                 goto msix;
1384         if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1385                 goto msix_single_vector;
1386         if (!strcmp(ioat_interrupt_style, "msi"))
1387                 goto msi;
1388         if (!strcmp(ioat_interrupt_style, "intx"))
1389                 goto intx;
1390         dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
1391         goto err_no_irq;
1392
1393 msix:
1394         /* The number of MSI-X vectors should equal the number of channels */
1395         msixcnt = device->common.chancnt;
1396         for (i = 0; i < msixcnt; i++)
1397                 device->msix_entries[i].entry = i;
1398
1399         err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
1400         if (err < 0)
1401                 goto msi;
1402         if (err > 0)
1403                 goto msix_single_vector;
1404
1405         for (i = 0; i < msixcnt; i++) {
1406                 msix = &device->msix_entries[i];
1407                 ioat_chan = ioat_chan_by_index(device, i);
1408                 err = devm_request_irq(dev, msix->vector,
1409                                        ioat_dma_do_interrupt_msix, 0,
1410                                        "ioat-msix", ioat_chan);
1411                 if (err) {
1412                         for (j = 0; j < i; j++) {
1413                                 msix = &device->msix_entries[j];
1414                                 ioat_chan = ioat_chan_by_index(device, j);
1415                                 devm_free_irq(dev, msix->vector, ioat_chan);
1416                         }
1417                         goto msix_single_vector;
1418                 }
1419         }
1420         intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1421         goto done;
1422
1423 msix_single_vector:
1424         msix = &device->msix_entries[0];
1425         msix->entry = 0;
1426         err = pci_enable_msix(pdev, device->msix_entries, 1);
1427         if (err)
1428                 goto msi;
1429
1430         err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1431                                "ioat-msix", device);
1432         if (err) {
1433                 pci_disable_msix(pdev);
1434                 goto msi;
1435         }
1436         goto done;
1437
1438 msi:
1439         err = pci_enable_msi(pdev);
1440         if (err)
1441                 goto intx;
1442
1443         err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1444                                "ioat-msi", device);
1445         if (err) {
1446                 pci_disable_msi(pdev);
1447                 goto intx;
1448         }
1449         goto done;
1450
1451 intx:
1452         err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1453                                IRQF_SHARED, "ioat-intx", device);
1454         if (err)
1455                 goto err_no_irq;
1456
1457 done:
1458         if (device->intr_quirk)
1459                 device->intr_quirk(device);
1460         intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1461         writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1462         return 0;
1463
1464 err_no_irq:
1465         /* Disable all interrupt generation */
1466         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1467         dev_err(dev, "no usable interrupts\n");
1468         return err;
1469 }
1470
1471 static void ioat_disable_interrupts(struct ioatdma_device *device)
1472 {
1473         /* Disable all interrupt generation */
1474         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1475 }
1476
1477 static int ioat_probe(struct ioatdma_device *device)
1478 {
1479         int err = -ENODEV;
1480         struct dma_device *dma = &device->common;
1481         struct pci_dev *pdev = device->pdev;
1482         struct device *dev = &pdev->dev;
1483
1484         /* DMA coherent memory pool for DMA descriptor allocations */
1485         device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1486                                            sizeof(struct ioat_dma_descriptor),
1487                                            64, 0);
1488         if (!device->dma_pool) {
1489                 err = -ENOMEM;
1490                 goto err_dma_pool;
1491         }
1492
1493         device->completion_pool = pci_pool_create("completion_pool", pdev,
1494                                                   sizeof(u64), SMP_CACHE_BYTES,
1495                                                   SMP_CACHE_BYTES);
1496         if (!device->completion_pool) {
1497                 err = -ENOMEM;
1498                 goto err_completion_pool;
1499         }
1500
1501         ioat_dma_enumerate_channels(device);
1502
1503         dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1504         dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
1505         dma->device_free_chan_resources = ioat_dma_free_chan_resources;
1506         dma->device_is_tx_complete = ioat_dma_is_complete;
1507         dma->dev = &pdev->dev;
1508
1509         dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
1510                 " %d channels, device version 0x%02x, driver version %s\n",
1511                 dma->chancnt, device->version, IOAT_DMA_VERSION);
1512
1513         if (!dma->chancnt) {
1514                 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
1515                         "zero channels detected\n");
1516                 goto err_setup_interrupts;
1517         }
1518
1519         err = ioat_dma_setup_interrupts(device);
1520         if (err)
1521                 goto err_setup_interrupts;
1522
1523         err = ioat_dma_self_test(device);
1524         if (err)
1525                 goto err_self_test;
1526
1527         return 0;
1528
1529 err_self_test:
1530         ioat_disable_interrupts(device);
1531 err_setup_interrupts:
1532         pci_pool_destroy(device->completion_pool);
1533 err_completion_pool:
1534         pci_pool_destroy(device->dma_pool);
1535 err_dma_pool:
1536         return err;
1537 }
1538
1539 static int ioat_register(struct ioatdma_device *device)
1540 {
1541         int err = dma_async_device_register(&device->common);
1542
1543         if (err) {
1544                 ioat_disable_interrupts(device);
1545                 pci_pool_destroy(device->completion_pool);
1546                 pci_pool_destroy(device->dma_pool);
1547         }
1548
1549         return err;
1550 }
1551
1552 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1553 static void ioat1_intr_quirk(struct ioatdma_device *device)
1554 {
1555         struct pci_dev *pdev = device->pdev;
1556         u32 dmactrl;
1557
1558         pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1559         if (pdev->msi_enabled)
1560                 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1561         else
1562                 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1563         pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1564 }
1565
1566 int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1567 {
1568         struct pci_dev *pdev = device->pdev;
1569         struct dma_device *dma;
1570         int err;
1571
1572         device->intr_quirk = ioat1_intr_quirk;
1573         dma = &device->common;
1574         dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1575         dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1576
1577         err = ioat_probe(device);
1578         if (err)
1579                 return err;
1580         ioat_set_tcp_copy_break(4096);
1581         err = ioat_register(device);
1582         if (err)
1583                 return err;
1584         if (dca)
1585                 device->dca = ioat_dca_init(pdev, device->reg_base);
1586
1587         INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1588         schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1589
1590         return err;
1591 }
1592
1593 int ioat2_dma_probe(struct ioatdma_device *device, int dca)
1594 {
1595         struct pci_dev *pdev = device->pdev;
1596         struct dma_device *dma;
1597         struct dma_chan *chan;
1598         struct ioat_dma_chan *ioat_chan;
1599         int err;
1600
1601         dma = &device->common;
1602         dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1603         dma->device_issue_pending = ioat2_dma_memcpy_issue_pending;
1604
1605         err = ioat_probe(device);
1606         if (err)
1607                 return err;
1608         ioat_set_tcp_copy_break(2048);
1609
1610         list_for_each_entry(chan, &dma->channels, device_node) {
1611                 ioat_chan = to_ioat_chan(chan);
1612                 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
1613                        ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1614         }
1615
1616         err = ioat_register(device);
1617         if (err)
1618                 return err;
1619         if (dca)
1620                 device->dca = ioat2_dca_init(pdev, device->reg_base);
1621
1622         INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1623         schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1624
1625         return err;
1626 }
1627
1628 int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1629 {
1630         struct pci_dev *pdev = device->pdev;
1631         struct dma_device *dma;
1632         struct dma_chan *chan;
1633         struct ioat_dma_chan *ioat_chan;
1634         int err;
1635         u16 dev_id;
1636
1637         dma = &device->common;
1638         dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1639         dma->device_issue_pending = ioat2_dma_memcpy_issue_pending;
1640
1641         /* -= IOAT ver.3 workarounds =- */
1642         /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1643          * that can cause stability issues for IOAT ver.3
1644          */
1645         pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1646
1647         /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1648          * (workaround for spurious config parity error after restart)
1649          */
1650         pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1651         if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1652                 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1653
1654         err = ioat_probe(device);
1655         if (err)
1656                 return err;
1657         ioat_set_tcp_copy_break(262144);
1658
1659         list_for_each_entry(chan, &dma->channels, device_node) {
1660                 ioat_chan = to_ioat_chan(chan);
1661                 writel(IOAT_DMA_DCA_ANY_CPU,
1662                        ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1663         }
1664
1665         err = ioat_register(device);
1666         if (err)
1667                 return err;
1668         if (dca)
1669                 device->dca = ioat3_dca_init(pdev, device->reg_base);
1670
1671         return err;
1672 }
1673
1674 void ioat_dma_remove(struct ioatdma_device *device)
1675 {
1676         struct dma_chan *chan, *_chan;
1677         struct ioat_dma_chan *ioat_chan;
1678         struct dma_device *dma = &device->common;
1679
1680         if (device->version != IOAT_VER_3_0)
1681                 cancel_delayed_work(&device->work);
1682
1683         ioat_disable_interrupts(device);
1684
1685         dma_async_device_unregister(dma);
1686
1687         pci_pool_destroy(device->dma_pool);
1688         pci_pool_destroy(device->completion_pool);
1689
1690         list_for_each_entry_safe(chan, _chan, &dma->channels, device_node) {
1691                 ioat_chan = to_ioat_chan(chan);
1692                 list_del(&chan->device_node);
1693         }
1694 }
1695