2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
34 #include <linux/sizes.h>
36 #include "registers.h"
39 #include "../dmaengine.h"
41 static char *chanerr_str[] = {
42 "DMA Transfer Destination Address Error",
43 "Next Descriptor Address Error",
45 "Chan Address Value Error",
47 "Chipset Uncorrectable Data Integrity Error",
48 "DMA Uncorrectable Data Integrity Error",
51 "Descriptor Control Error",
52 "Descriptor Transfer Size Error",
53 "Completion Address Error",
54 "Interrupt Configuration Error",
55 "Super extended descriptor Address Error",
59 "Descriptor Count Error",
60 "DIF All F detect Error",
61 "Guard Tag verification Error",
62 "Application Tag verification Error",
63 "Reference Tag verification Error",
65 "Result DIF All F detect Error",
66 "Result Guard Tag verification Error",
67 "Result Application Tag verification Error",
68 "Result Reference Tag verification Error",
72 static void ioat_eh(struct ioatdma_chan *ioat_chan);
74 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
78 for (i = 0; i < 32; i++) {
79 if ((chanerr >> i) & 1) {
81 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
90 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
92 * @data: interrupt data
94 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
96 struct ioatdma_device *instance = data;
97 struct ioatdma_chan *ioat_chan;
98 unsigned long attnstatus;
102 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
104 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
107 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
108 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
112 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
113 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
114 ioat_chan = ioat_chan_by_index(instance, bit);
115 if (test_bit(IOAT_RUN, &ioat_chan->state))
116 tasklet_schedule(&ioat_chan->cleanup_task);
119 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
124 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
126 * @data: interrupt data
128 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
130 struct ioatdma_chan *ioat_chan = data;
132 if (test_bit(IOAT_RUN, &ioat_chan->state))
133 tasklet_schedule(&ioat_chan->cleanup_task);
138 void ioat_stop(struct ioatdma_chan *ioat_chan)
140 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
141 struct pci_dev *pdev = ioat_dma->pdev;
142 int chan_id = chan_num(ioat_chan);
143 struct msix_entry *msix;
145 /* 1/ stop irq from firing tasklets
146 * 2/ stop the tasklet from re-arming irqs
148 clear_bit(IOAT_RUN, &ioat_chan->state);
150 /* flush inflight interrupts */
151 switch (ioat_dma->irq_mode) {
153 msix = &ioat_dma->msix_entries[chan_id];
154 synchronize_irq(msix->vector);
158 synchronize_irq(pdev->irq);
164 /* flush inflight timers */
165 del_timer_sync(&ioat_chan->timer);
167 /* flush inflight tasklet runs */
168 tasklet_kill(&ioat_chan->cleanup_task);
170 /* final cleanup now that everything is quiesced and can't re-arm */
171 ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
174 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
176 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
177 ioat_chan->issued = ioat_chan->head;
178 writew(ioat_chan->dmacount,
179 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
180 dev_dbg(to_dev(ioat_chan),
181 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
182 __func__, ioat_chan->head, ioat_chan->tail,
183 ioat_chan->issued, ioat_chan->dmacount);
186 void ioat_issue_pending(struct dma_chan *c)
188 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
190 if (ioat_ring_pending(ioat_chan)) {
191 spin_lock_bh(&ioat_chan->prep_lock);
192 __ioat_issue_pending(ioat_chan);
193 spin_unlock_bh(&ioat_chan->prep_lock);
198 * ioat_update_pending - log pending descriptors
199 * @ioat: ioat+ channel
201 * Check if the number of unsubmitted descriptors has exceeded the
202 * watermark. Called with prep_lock held
204 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
206 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
207 __ioat_issue_pending(ioat_chan);
210 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
212 struct ioat_ring_ent *desc;
213 struct ioat_dma_descriptor *hw;
215 if (ioat_ring_space(ioat_chan) < 1) {
216 dev_err(to_dev(ioat_chan),
217 "Unable to start null desc - ring full\n");
221 dev_dbg(to_dev(ioat_chan),
222 "%s: head: %#x tail: %#x issued: %#x\n",
223 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
224 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
229 hw->ctl_f.int_en = 1;
230 hw->ctl_f.compl_write = 1;
231 /* set size to non-zero value (channel returns error when size is 0) */
232 hw->size = NULL_DESC_BUFFER_SIZE;
235 async_tx_ack(&desc->txd);
236 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
237 dump_desc_dbg(ioat_chan, desc);
238 /* make sure descriptors are written before we submit */
240 ioat_chan->head += 1;
241 __ioat_issue_pending(ioat_chan);
244 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
246 spin_lock_bh(&ioat_chan->prep_lock);
247 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
248 __ioat_start_null_desc(ioat_chan);
249 spin_unlock_bh(&ioat_chan->prep_lock);
252 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
254 /* set the tail to be re-issued */
255 ioat_chan->issued = ioat_chan->tail;
256 ioat_chan->dmacount = 0;
257 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
259 dev_dbg(to_dev(ioat_chan),
260 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
261 __func__, ioat_chan->head, ioat_chan->tail,
262 ioat_chan->issued, ioat_chan->dmacount);
264 if (ioat_ring_pending(ioat_chan)) {
265 struct ioat_ring_ent *desc;
267 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
268 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
269 __ioat_issue_pending(ioat_chan);
271 __ioat_start_null_desc(ioat_chan);
274 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
276 unsigned long end = jiffies + tmo;
280 status = ioat_chansts(ioat_chan);
281 if (is_ioat_active(status) || is_ioat_idle(status))
282 ioat_suspend(ioat_chan);
283 while (is_ioat_active(status) || is_ioat_idle(status)) {
284 if (tmo && time_after(jiffies, end)) {
288 status = ioat_chansts(ioat_chan);
295 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
297 unsigned long end = jiffies + tmo;
300 ioat_reset(ioat_chan);
301 while (ioat_reset_pending(ioat_chan)) {
302 if (end && time_after(jiffies, end)) {
312 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
313 __releases(&ioat_chan->prep_lock)
315 struct dma_chan *c = tx->chan;
316 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
319 cookie = dma_cookie_assign(tx);
320 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
322 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
323 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
325 /* make descriptor updates visible before advancing ioat->head,
326 * this is purposefully not smp_wmb() since we are also
327 * publishing the descriptor updates to a dma device
331 ioat_chan->head += ioat_chan->produce;
333 ioat_update_pending(ioat_chan);
334 spin_unlock_bh(&ioat_chan->prep_lock);
339 static struct ioat_ring_ent *
340 ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
342 struct ioat_dma_descriptor *hw;
343 struct ioat_ring_ent *desc;
344 struct ioatdma_device *ioat_dma;
345 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
351 ioat_dma = to_ioatdma_device(chan->device);
353 chunk = idx / IOAT_DESCS_PER_2M;
354 idx &= (IOAT_DESCS_PER_2M - 1);
355 offs = idx * IOAT_DESC_SZ;
356 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
357 phys = ioat_chan->descs[chunk].hw + offs;
358 hw = (struct ioat_dma_descriptor *)pos;
359 memset(hw, 0, sizeof(*hw));
361 desc = kmem_cache_zalloc(ioat_cache, flags);
365 dma_async_tx_descriptor_init(&desc->txd, chan);
366 desc->txd.tx_submit = ioat_tx_submit_unlock;
368 desc->txd.phys = phys;
372 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
374 kmem_cache_free(ioat_cache, desc);
377 struct ioat_ring_ent **
378 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
380 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
381 struct ioat_ring_ent **ring;
382 int total_descs = 1 << order;
385 /* allocate the array to hold the software ring */
386 ring = kcalloc(total_descs, sizeof(*ring), flags);
390 ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
392 for (i = 0; i < chunks; i++) {
393 struct ioat_descs *descs = &ioat_chan->descs[i];
395 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
396 SZ_2M, &descs->hw, flags);
397 if (!descs->virt && (i > 0)) {
400 for (idx = 0; idx < i; idx++) {
401 dma_free_coherent(to_dev(ioat_chan), SZ_2M,
402 descs->virt, descs->hw);
407 ioat_chan->desc_chunks = 0;
413 for (i = 0; i < total_descs; i++) {
414 ring[i] = ioat_alloc_ring_ent(c, i, flags);
419 ioat_free_ring_ent(ring[i], c);
421 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
422 dma_free_coherent(to_dev(ioat_chan),
424 ioat_chan->descs[idx].virt,
425 ioat_chan->descs[idx].hw);
426 ioat_chan->descs[idx].virt = NULL;
427 ioat_chan->descs[idx].hw = 0;
430 ioat_chan->desc_chunks = 0;
434 set_desc_id(ring[i], i);
438 for (i = 0; i < total_descs-1; i++) {
439 struct ioat_ring_ent *next = ring[i+1];
440 struct ioat_dma_descriptor *hw = ring[i]->hw;
442 hw->next = next->txd.phys;
444 ring[i]->hw->next = ring[0]->txd.phys;
450 * ioat_check_space_lock - verify space and grab ring producer lock
451 * @ioat: ioat,3 channel (ring) to operate on
452 * @num_descs: allocation length
454 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
455 __acquires(&ioat_chan->prep_lock)
457 spin_lock_bh(&ioat_chan->prep_lock);
458 /* never allow the last descriptor to be consumed, we need at
459 * least one free at all times to allow for on-the-fly ring
462 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
463 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
464 __func__, num_descs, ioat_chan->head,
465 ioat_chan->tail, ioat_chan->issued);
466 ioat_chan->produce = num_descs;
467 return 0; /* with ioat->prep_lock held */
469 spin_unlock_bh(&ioat_chan->prep_lock);
471 dev_dbg_ratelimited(to_dev(ioat_chan),
472 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
473 __func__, num_descs, ioat_chan->head,
474 ioat_chan->tail, ioat_chan->issued);
476 /* progress reclaim in the allocation failure case we may be
477 * called under bh_disabled so we need to trigger the timer
480 if (time_is_before_jiffies(ioat_chan->timer.expires)
481 && timer_pending(&ioat_chan->timer)) {
482 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
483 ioat_timer_event((unsigned long)ioat_chan);
489 static bool desc_has_ext(struct ioat_ring_ent *desc)
491 struct ioat_dma_descriptor *hw = desc->hw;
493 if (hw->ctl_f.op == IOAT_OP_XOR ||
494 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
495 struct ioat_xor_descriptor *xor = desc->xor;
497 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
499 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
500 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
501 struct ioat_pq_descriptor *pq = desc->pq;
503 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
511 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
516 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
517 kmem_cache_free(ioat_sed_cache, sed);
520 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
525 completion = *ioat_chan->completion;
526 phys_complete = ioat_chansts_to_addr(completion);
528 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
529 (unsigned long long) phys_complete);
531 return phys_complete;
534 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
537 *phys_complete = ioat_get_current_completion(ioat_chan);
538 if (*phys_complete == ioat_chan->last_completion)
541 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
542 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
548 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
550 struct ioat_dma_descriptor *hw = desc->hw;
552 switch (hw->ctl_f.op) {
554 case IOAT_OP_PQ_VAL_16S:
556 struct ioat_pq_descriptor *pq = desc->pq;
558 /* check if there's error written */
559 if (!pq->dwbes_f.wbes)
562 /* need to set a chanerr var for checking to clear later */
564 if (pq->dwbes_f.p_val_err)
565 *desc->result |= SUM_CHECK_P_RESULT;
567 if (pq->dwbes_f.q_val_err)
568 *desc->result |= SUM_CHECK_Q_RESULT;
578 * __cleanup - reclaim used descriptors
579 * @ioat: channel (ring) to clean
581 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
583 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
584 struct ioat_ring_ent *desc;
585 bool seen_current = false;
586 int idx = ioat_chan->tail, i;
589 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
590 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
593 * At restart of the channel, the completion address and the
594 * channel status will be 0 due to starting a new chain. Since
595 * it's new chain and the first descriptor "fails", there is
596 * nothing to clean up. We do not want to reap the entire submitted
597 * chain due to this 0 address value and then BUG.
602 active = ioat_ring_active(ioat_chan);
603 for (i = 0; i < active && !seen_current; i++) {
604 struct dma_async_tx_descriptor *tx;
606 smp_read_barrier_depends();
607 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
608 desc = ioat_get_ring_ent(ioat_chan, idx + i);
609 dump_desc_dbg(ioat_chan, desc);
611 /* set err stat if we are using dwbes */
612 if (ioat_dma->cap & IOAT_CAP_DWBES)
613 desc_get_errstat(ioat_chan, desc);
617 struct dmaengine_result res;
619 dma_cookie_complete(tx);
620 dma_descriptor_unmap(tx);
621 res.result = DMA_TRANS_NOERROR;
622 dmaengine_desc_get_callback_invoke(tx, NULL);
624 tx->callback_result = NULL;
627 if (tx->phys == phys_complete)
630 /* skip extended descriptors */
631 if (desc_has_ext(desc)) {
632 BUG_ON(i + 1 >= active);
636 /* cleanup super extended descriptors */
638 ioat_free_sed(ioat_dma, desc->sed);
643 /* finish all descriptor reads before incrementing tail */
645 ioat_chan->tail = idx + i;
646 /* no active descs have written a completion? */
647 BUG_ON(active && !seen_current);
648 ioat_chan->last_completion = phys_complete;
650 if (active - i == 0) {
651 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
653 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
656 /* 5 microsecond delay per pending descriptor */
657 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
658 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
661 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
665 spin_lock_bh(&ioat_chan->cleanup_lock);
667 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
668 __cleanup(ioat_chan, phys_complete);
670 if (is_ioat_halted(*ioat_chan->completion)) {
671 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
674 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
675 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
680 spin_unlock_bh(&ioat_chan->cleanup_lock);
683 void ioat_cleanup_event(unsigned long data)
685 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
687 ioat_cleanup(ioat_chan);
688 if (!test_bit(IOAT_RUN, &ioat_chan->state))
690 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
693 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
697 ioat_quiesce(ioat_chan, 0);
698 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
699 __cleanup(ioat_chan, phys_complete);
701 __ioat_restart_chan(ioat_chan);
705 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
707 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
708 struct ioat_ring_ent *desc;
710 int idx = ioat_chan->tail, i;
713 * We assume that the failed descriptor has been processed.
714 * Now we are just returning all the remaining submitted
715 * descriptors to abort.
717 active = ioat_ring_active(ioat_chan);
719 /* we skip the failed descriptor that tail points to */
720 for (i = 1; i < active; i++) {
721 struct dma_async_tx_descriptor *tx;
723 smp_read_barrier_depends();
724 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
725 desc = ioat_get_ring_ent(ioat_chan, idx + i);
729 struct dmaengine_result res;
731 dma_cookie_complete(tx);
732 dma_descriptor_unmap(tx);
733 res.result = DMA_TRANS_ABORTED;
734 dmaengine_desc_get_callback_invoke(tx, &res);
736 tx->callback_result = NULL;
739 /* skip extended descriptors */
740 if (desc_has_ext(desc)) {
741 WARN_ON(i + 1 >= active);
745 /* cleanup super extended descriptors */
747 ioat_free_sed(ioat_dma, desc->sed);
752 smp_mb(); /* finish all descriptor reads before incrementing tail */
753 ioat_chan->tail = idx + active;
755 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
756 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
759 static void ioat_eh(struct ioatdma_chan *ioat_chan)
761 struct pci_dev *pdev = to_pdev(ioat_chan);
762 struct ioat_dma_descriptor *hw;
763 struct dma_async_tx_descriptor *tx;
765 struct ioat_ring_ent *desc;
770 struct dmaengine_result res;
772 /* cleanup so tail points to descriptor that caused the error */
773 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
774 __cleanup(ioat_chan, phys_complete);
776 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
777 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
779 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
780 __func__, chanerr, chanerr_int);
782 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
784 dump_desc_dbg(ioat_chan, desc);
786 switch (hw->ctl_f.op) {
787 case IOAT_OP_XOR_VAL:
788 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
789 *desc->result |= SUM_CHECK_P_RESULT;
790 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
794 case IOAT_OP_PQ_VAL_16S:
795 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
796 *desc->result |= SUM_CHECK_P_RESULT;
797 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
799 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
800 *desc->result |= SUM_CHECK_Q_RESULT;
801 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
806 if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
807 if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
808 res.result = DMA_TRANS_READ_FAILED;
809 err_handled |= IOAT_CHANERR_READ_DATA_ERR;
810 } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
811 res.result = DMA_TRANS_WRITE_FAILED;
812 err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
817 res.result = DMA_TRANS_NOERROR;
819 /* fault on unhandled error or spurious halt */
820 if (chanerr ^ err_handled || chanerr == 0) {
821 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
822 __func__, chanerr, err_handled);
823 dev_err(to_dev(ioat_chan), "Errors handled:\n");
824 ioat_print_chanerrs(ioat_chan, err_handled);
825 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
826 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
831 /* cleanup the faulty descriptor since we are continuing */
834 dma_cookie_complete(tx);
835 dma_descriptor_unmap(tx);
836 dmaengine_desc_get_callback_invoke(tx, &res);
838 tx->callback_result = NULL;
841 /* mark faulting descriptor as complete */
842 *ioat_chan->completion = desc->txd.phys;
844 spin_lock_bh(&ioat_chan->prep_lock);
845 /* we need abort all descriptors */
847 ioat_abort_descs(ioat_chan);
848 /* clean up the channel, we could be in weird state */
849 ioat_reset_hw(ioat_chan);
852 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
853 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
855 ioat_restart_channel(ioat_chan);
856 spin_unlock_bh(&ioat_chan->prep_lock);
859 static void check_active(struct ioatdma_chan *ioat_chan)
861 if (ioat_ring_active(ioat_chan)) {
862 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
866 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
867 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
870 void ioat_timer_event(unsigned long data)
872 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
873 dma_addr_t phys_complete;
876 status = ioat_chansts(ioat_chan);
878 /* when halted due to errors check for channel
879 * programming errors before advancing the completion state
881 if (is_ioat_halted(status)) {
884 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
885 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
887 dev_err(to_dev(ioat_chan), "Errors:\n");
888 ioat_print_chanerrs(ioat_chan, chanerr);
890 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
891 spin_lock_bh(&ioat_chan->cleanup_lock);
892 spin_lock_bh(&ioat_chan->prep_lock);
893 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
894 spin_unlock_bh(&ioat_chan->prep_lock);
896 ioat_abort_descs(ioat_chan);
897 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
898 ioat_reset_hw(ioat_chan);
899 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
900 ioat_restart_channel(ioat_chan);
902 spin_lock_bh(&ioat_chan->prep_lock);
903 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
904 spin_unlock_bh(&ioat_chan->prep_lock);
905 spin_unlock_bh(&ioat_chan->cleanup_lock);
911 spin_lock_bh(&ioat_chan->cleanup_lock);
913 /* handle the no-actives case */
914 if (!ioat_ring_active(ioat_chan)) {
915 spin_lock_bh(&ioat_chan->prep_lock);
916 check_active(ioat_chan);
917 spin_unlock_bh(&ioat_chan->prep_lock);
918 spin_unlock_bh(&ioat_chan->cleanup_lock);
922 /* if we haven't made progress and we have already
923 * acknowledged a pending completion once, then be more
924 * forceful with a restart
926 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
927 __cleanup(ioat_chan, phys_complete);
928 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
931 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
932 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
934 dev_err(to_dev(ioat_chan), "Errors:\n");
935 ioat_print_chanerrs(ioat_chan, chanerr);
937 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
938 ioat_ring_active(ioat_chan));
940 spin_lock_bh(&ioat_chan->prep_lock);
941 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
942 spin_unlock_bh(&ioat_chan->prep_lock);
944 ioat_abort_descs(ioat_chan);
945 dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
946 ioat_reset_hw(ioat_chan);
947 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
948 ioat_restart_channel(ioat_chan);
950 spin_lock_bh(&ioat_chan->prep_lock);
951 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
952 spin_unlock_bh(&ioat_chan->prep_lock);
953 spin_unlock_bh(&ioat_chan->cleanup_lock);
956 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
958 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
959 spin_unlock_bh(&ioat_chan->cleanup_lock);
963 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
964 struct dma_tx_state *txstate)
966 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
969 ret = dma_cookie_status(c, cookie, txstate);
970 if (ret == DMA_COMPLETE)
973 ioat_cleanup(ioat_chan);
975 return dma_cookie_status(c, cookie, txstate);
978 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
980 /* throw away whatever the channel was doing and get it
981 * initialized, with ioat3 specific workarounds
983 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
984 struct pci_dev *pdev = ioat_dma->pdev;
989 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
991 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
992 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
994 if (ioat_dma->version < IOAT_VER_3_3) {
995 /* clear any pending errors */
996 err = pci_read_config_dword(pdev,
997 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1000 "channel error register unreachable\n");
1003 pci_write_config_dword(pdev,
1004 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1006 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1007 * (workaround for spurious config parity error after restart)
1009 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1010 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1011 pci_write_config_dword(pdev,
1012 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1017 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1018 ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1019 ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1020 ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1024 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1026 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1027 writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1028 writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1029 writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1034 dev_err(&pdev->dev, "Failed to reset: %d\n", err);