20 ASRC
21 ESAI
22 SSI Dual FIFO (needs firmware ver >= 2)
+ 23 Shared ASRC
The third cell specifies the transfer priority as below.
--- /dev/null
+* Freescale MPC512x and MPC8308 DMA Controller
+
+The DMA controller in Freescale MPC512x and MPC8308 SoCs can move
+blocks of memory contents between memory and peripherals or
+from memory to memory.
+
+Refer to "Generic DMA Controller and DMA request bindings" in
+the dma/dma.txt file for a more detailed description of binding.
+
+Required properties:
+- compatible: should be "fsl,mpc5121-dma" or "fsl,mpc8308-dma";
+- reg: should contain the DMA controller registers location and length;
+- interrupt for the DMA controller: syntax of interrupt client node
+ is described in interrupt-controller/interrupts.txt file.
+- #dma-cells: the length of the DMA specifier, must be <1>.
+ Each channel of this DMA controller has a peripheral request line,
+ the assignment is fixed in hardware. This one cell
+ in dmas property of a client device represents the channel number.
+
+Example:
+
+ dma0: dma@14000 {
+ compatible = "fsl,mpc5121-dma";
+ reg = <0x14000 0x1800>;
+ interrupts = <65 0x8>;
+ #dma-cells = <1>;
+ };
+
+DMA clients must use the format described in dma/dma.txt file.
--- /dev/null
+* Renesas "Type-AXI" NBPFAXI* DMA controllers
+
+* DMA controller
+
+Required properties
+
+- compatible: must be one of
+ "renesas,nbpfaxi64dmac1b4"
+ "renesas,nbpfaxi64dmac1b8"
+ "renesas,nbpfaxi64dmac1b16"
+ "renesas,nbpfaxi64dmac4b4"
+ "renesas,nbpfaxi64dmac4b8"
+ "renesas,nbpfaxi64dmac4b16"
+ "renesas,nbpfaxi64dmac8b4"
+ "renesas,nbpfaxi64dmac8b8"
+ "renesas,nbpfaxi64dmac8b16"
+- #dma-cells: must be 2: the first integer is a terminal number, to which this
+ slave is connected, the second one is flags. Flags is a bitmask
+ with the following bits defined:
+
+#define NBPF_SLAVE_RQ_HIGH 1
+#define NBPF_SLAVE_RQ_LOW 2
+#define NBPF_SLAVE_RQ_LEVEL 4
+
+Optional properties:
+
+You can use dma-channels and dma-requests as described in dma.txt, although they
+won't be used, this information is derived from the compatibility string.
+
+Example:
+
+ dma: dma-controller@48000000 {
+ compatible = "renesas,nbpfaxi64dmac8b4";
+ reg = <0x48000000 0x400>;
+ interrupts = <0 12 0x4
+ 0 13 0x4
+ 0 14 0x4
+ 0 15 0x4
+ 0 16 0x4
+ 0 17 0x4
+ 0 18 0x4
+ 0 19 0x4>;
+ #dma-cells = <2>;
+ dma-channels = <8>;
+ dma-requests = <8>;
+ };
+
+* DMA client
+
+Required properties:
+
+dmas and dma-names are required, as described in dma.txt.
+
+Example:
+
+#include <dt-bindings/dma/nbpfaxi.h>
+
+...
+ dmas = <&dma 0 (NBPF_SLAVE_RQ_HIGH | NBPF_SLAVE_RQ_LEVEL)
+ &dma 1 (NBPF_SLAVE_RQ_HIGH | NBPF_SLAVE_RQ_LEVEL)>;
+ dma-names = "rx", "tx";
--- /dev/null
+* R-Car Audio DMAC peri peri Device Tree bindings
+
+Required properties:
+- compatible: should be "renesas,rcar-audmapp"
+- #dma-cells: should be <1>, see "dmas" property below
+
+Example:
+ audmapp: audio-dma-pp@0xec740000 {
+ compatible = "renesas,rcar-audmapp";
+ #dma-cells = <1>;
+
+ reg = <0 0xec740000 0 0x200>;
+ };
+
+
+* DMA client
+
+Required properties:
+- dmas: a list of <[DMA multiplexer phandle] [SRS/DRS value]> pairs,
+ where SRS/DRS values are fixed handles, specified in the SoC
+ manual as the value that would be written into the PDMACHCR.
+- dma-names: a list of DMA channel names, one per "dmas" entry
+
+Example:
+
+ dmas = <&audmapp 0x2d00
+ &audmapp 0x3700>;
+ dma-names = "src0_ssiu0",
+ "dvc0_ssiu0";
--- /dev/null
+* Renesas R-Car DMA Controller Device Tree bindings
+
+Renesas R-Car Generation 2 SoCs have have multiple multi-channel DMA
+controller instances named DMAC capable of serving multiple clients. Channels
+can be dedicated to specific clients or shared between a large number of
+clients.
+
+DMA clients are connected to the DMAC ports referenced by an 8-bit identifier
+called MID/RID.
+
+Each DMA client is connected to one dedicated port of the DMAC, identified by
+an 8-bit port number called the MID/RID. A DMA controller can thus serve up to
+256 clients in total. When the number of hardware channels is lower than the
+number of clients to be served, channels must be shared between multiple DMA
+clients. The association of DMA clients to DMAC channels is fully dynamic and
+not described in these device tree bindings.
+
+Required Properties:
+
+- compatible: must contain "renesas,rcar-dmac"
+
+- reg: base address and length of the registers block for the DMAC
+
+- interrupts: interrupt specifiers for the DMAC, one for each entry in
+ interrupt-names.
+- interrupt-names: one entry per channel, named "ch%u", where %u is the
+ channel number ranging from zero to the number of channels minus one.
+
+- clock-names: "fck" for the functional clock
+- clocks: a list of phandle + clock-specifier pairs, one for each entry
+ in clock-names.
+- clock-names: must contain "fck" for the functional clock.
+
+- #dma-cells: must be <1>, the cell specifies the MID/RID of the DMAC port
+ connected to the DMA client
+- dma-channels: number of DMA channels
+
+Example: R8A7790 (R-Car H2) SYS-DMACs
+
+ dmac0: dma-controller@e6700000 {
+ compatible = "renesas,rcar-dmac";
+ reg = <0 0xe6700000 0 0x20000>;
+ interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH
+ 0 200 IRQ_TYPE_LEVEL_HIGH
+ 0 201 IRQ_TYPE_LEVEL_HIGH
+ 0 202 IRQ_TYPE_LEVEL_HIGH
+ 0 203 IRQ_TYPE_LEVEL_HIGH
+ 0 204 IRQ_TYPE_LEVEL_HIGH
+ 0 205 IRQ_TYPE_LEVEL_HIGH
+ 0 206 IRQ_TYPE_LEVEL_HIGH
+ 0 207 IRQ_TYPE_LEVEL_HIGH
+ 0 208 IRQ_TYPE_LEVEL_HIGH
+ 0 209 IRQ_TYPE_LEVEL_HIGH
+ 0 210 IRQ_TYPE_LEVEL_HIGH
+ 0 211 IRQ_TYPE_LEVEL_HIGH
+ 0 212 IRQ_TYPE_LEVEL_HIGH
+ 0 213 IRQ_TYPE_LEVEL_HIGH
+ 0 214 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14";
+ clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC0>;
+ clock-names = "fck";
+ #dma-cells = <1>;
+ dma-channels = <15>;
+ };
+
+ dmac1: dma-controller@e6720000 {
+ compatible = "renesas,rcar-dmac";
+ reg = <0 0xe6720000 0 0x20000>;
+ interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
+ 0 216 IRQ_TYPE_LEVEL_HIGH
+ 0 217 IRQ_TYPE_LEVEL_HIGH
+ 0 218 IRQ_TYPE_LEVEL_HIGH
+ 0 219 IRQ_TYPE_LEVEL_HIGH
+ 0 308 IRQ_TYPE_LEVEL_HIGH
+ 0 309 IRQ_TYPE_LEVEL_HIGH
+ 0 310 IRQ_TYPE_LEVEL_HIGH
+ 0 311 IRQ_TYPE_LEVEL_HIGH
+ 0 312 IRQ_TYPE_LEVEL_HIGH
+ 0 313 IRQ_TYPE_LEVEL_HIGH
+ 0 314 IRQ_TYPE_LEVEL_HIGH
+ 0 315 IRQ_TYPE_LEVEL_HIGH
+ 0 316 IRQ_TYPE_LEVEL_HIGH
+ 0 317 IRQ_TYPE_LEVEL_HIGH
+ 0 318 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14";
+ clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC1>;
+ clock-names = "fck";
+ #dma-cells = <1>;
+ dma-channels = <15>;
+ };
Each dmas request consists of 4 cells:
1. A phandle pointing to the DMA controller
- 2. Device Type
+ 2. Device signal number, the signal line for single and burst requests
+ connected from the device to the DMA40 engine
3. The DMA request line number (only when 'use fixed channel' is set)
- 4. A 32bit mask specifying; mode, direction and endianness [NB: This list will grow]
+ 4. A 32bit mask specifying; mode, direction and endianness
+ [NB: This list will grow]
0x00000001: Mode:
Logical channel when unset
Physical channel when set
Normal priority when unset
High priority when set
+Existing signal numbers for the DB8500 ASIC. Unless specified, the signals are
+bidirectional, i.e. the same for RX and TX operations:
+
+0: SPI controller 0
+1: SD/MMC controller 0 (unused)
+2: SD/MMC controller 1 (unused)
+3: SD/MMC controller 2 (unused)
+4: I2C port 1
+5: I2C port 3
+6: I2C port 2
+7: I2C port 4
+8: Synchronous Serial Port SSP0
+9: Synchronous Serial Port SSP1
+10: Multi-Channel Display Engine MCDE RX
+11: UART port 2
+12: UART port 1
+13: UART port 0
+14: Multirate Serial Port MSP2
+15: I2C port 0
+16: USB OTG in/out endpoints 7 & 15
+17: USB OTG in/out endpoints 6 & 14
+18: USB OTG in/out endpoints 5 & 13
+19: USB OTG in/out endpoints 4 & 12
+20: SLIMbus or HSI channel 0
+21: SLIMbus or HSI channel 1
+22: SLIMbus or HSI channel 2
+23: SLIMbus or HSI channel 3
+24: Multimedia DSP SXA0
+25: Multimedia DSP SXA1
+26: Multimedia DSP SXA2
+27: Multimedia DSP SXA3
+28: SD/MM controller 2
+29: SD/MM controller 0
+30: MSP port 1 on DB8500 v1, MSP port 3 on DB8500 v2
+31: MSP port 0 or SLIMbus channel 0
+32: SD/MM controller 1
+33: SPI controller 2
+34: i2c3 RX2 TX2
+35: SPI controller 1
+36: USB OTG in/out endpoints 3 & 11
+37: USB OTG in/out endpoints 2 & 10
+38: USB OTG in/out endpoints 1 & 9
+39: USB OTG in/out endpoints 8
+40: SPI controller 3
+41: SD/MM controller 3
+42: SD/MM controller 4
+43: SD/MM controller 5
+44: Multimedia DSP SXA4
+45: Multimedia DSP SXA5
+46: SLIMbus channel 8 or Multimedia DSP SXA6
+47: SLIMbus channel 9 or Multimedia DSP SXA7
+48: Crypto Accelerator 1
+49: Crypto Accelerator 1 TX or Hash Accelerator 1 TX
+50: Hash Accelerator 1 TX
+51: memcpy TX (to be used by the DMA driver for memcpy operations)
+52: SLIMbus or HSI channel 4
+53: SLIMbus or HSI channel 5
+54: SLIMbus or HSI channel 6
+55: SLIMbus or HSI channel 7
+56: memcpy (to be used by the DMA driver for memcpy operations)
+57: memcpy (to be used by the DMA driver for memcpy operations)
+58: memcpy (to be used by the DMA driver for memcpy operations)
+59: memcpy (to be used by the DMA driver for memcpy operations)
+60: memcpy (to be used by the DMA driver for memcpy operations)
+61: Crypto Accelerator 0
+62: Crypto Accelerator 0 TX or Hash Accelerator 0 TX
+63: Hash Accelerator 0 TX
+
Example:
uart@80120000 {
--- /dev/null
+Allwinner A31 DMA Controller
+
+This driver follows the generic DMA bindings defined in dma.txt.
+
+Required properties:
+
+- compatible: Must be "allwinner,sun6i-a31-dma"
+- reg: Should contain the registers base address and length
+- interrupts: Should contain a reference to the interrupt used by this device
+- clocks: Should contain a reference to the parent AHB clock
+- resets: Should contain a reference to the reset controller asserting
+ this device in reset
+- #dma-cells : Should be 1, a single cell holding a line request number
+
+Example:
+ dma: dma-controller@01c02000 {
+ compatible = "allwinner,sun6i-a31-dma";
+ reg = <0x01c02000 0x1000>;
+ interrupts = <0 50 4>;
+ clocks = <&ahb1_gates 6>;
+ resets = <&ahb1_rst 6>;
+ #dma-cells = <1>;
+ };
+
+Clients:
+
+DMA clients connected to the A31 DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each
+channel: a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. The port ID as specified in the datasheet
+
+Example:
+spi2: spi@01c6a000 {
+ compatible = "allwinner,sun6i-a31-spi";
+ reg = <0x01c6a000 0x1000>;
+ interrupts = <0 67 4>;
+ clocks = <&ahb1_gates 22>, <&spi2_clk>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma 25>, <&dma 25>;
+ dma-names = "rx", "tx";
+ resets = <&ahb1_rst 22>;
+};
the given transaction.
Interface:
- struct dma_async_tx_descriptor *(*chan->device->device_prep_slave_sg)(
+ struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags);
- struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)(
+ struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_data_direction direction);
- struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
+ struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags);
The peripheral driver is expected to have mapped the scatterlist for
the DMA operation prior to calling device_prep_slave_sg, and must
keep the scatterlist mapped until the DMA operation has completed.
- The scatterlist must be mapped using the DMA struct device. So,
- normal setup should look like this:
+ The scatterlist must be mapped using the DMA struct device.
+ If a mapping needs to be synchronized later, dma_sync_*_for_*() must be
+ called using the DMA struct device, too.
+ So, normal setup should look like this:
nr_sg = dma_map_sg(chan->device->dev, sgl, sg_len);
if (nr_sg == 0)
/* error */
- desc = chan->device->device_prep_slave_sg(chan, sgl, nr_sg,
- direction, flags);
+ desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags);
Once a descriptor has been obtained, the callback information can be
added and the descriptor must then be submitted. Some DMA engine
description of this API.
This can be used in conjunction with dma_async_is_complete() and
- the cookie returned from 'descriptor->submit()' to check for
+ the cookie returned from dmaengine_submit() to check for
completion of a specific DMA transaction.
Note:
}
EXPORT_SYMBOL(edma_clear_event);
+/*
+ * edma_assign_channel_eventq - move given channel to desired eventq
+ * Arguments:
+ * channel - channel number
+ * eventq_no - queue to move the channel
+ *
+ * Can be used to move a channel to a selected event queue.
+ */
+void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no)
+{
+ unsigned ctlr;
+
+ ctlr = EDMA_CTLR(channel);
+ channel = EDMA_CHAN_SLOT(channel);
+
+ if (channel >= edma_cc[ctlr]->num_channels)
+ return;
+
+ /* default to low priority queue */
+ if (eventq_no == EVENTQ_DEFAULT)
+ eventq_no = edma_cc[ctlr]->default_queue;
+ if (eventq_no >= edma_cc[ctlr]->num_tc)
+ return;
+
+ map_dmach_queue(ctlr, channel, eventq_no);
+}
+EXPORT_SYMBOL(edma_assign_channel_eventq);
+
static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
struct edma *edma_cc)
{
queue_priority_map[i][1] = -1;
pdata->queue_priority_mapping = queue_priority_map;
- pdata->default_queue = 0;
+ /* Default queue has the lowest priority */
+ pdata->default_queue = i - 1;
return 0;
}
compatible = "fsl,mpc5121-dma";
reg = <0x14000 0x1800>;
interrupts = <65 0x8>;
+ #dma-cells = <1>;
};
};
* Define the default configuration for dual address memory-memory transfer.
* The 0x400 value represents auto-request, external->external.
*/
-#define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT))
+#define RS_DUAL (DM_INC | SM_INC | RS_AUTO | TS_INDEX2VAL(XMIT_SZ_32BIT))
static unsigned long dma_find_base(unsigned int chan)
{
#ifndef DMA_REGISTER_H
#define DMA_REGISTER_H
-/* DMA register */
-#define SAR 0x00
-#define DAR 0x04
-#define TCR 0x08
-#define CHCR 0x0C
-#define DMAOR 0x40
+/* DMA registers */
+#define SAR 0x00 /* Source Address Register */
+#define DAR 0x04 /* Destination Address Register */
+#define TCR 0x08 /* Transfer Count Register */
+#define CHCR 0x0C /* Channel Control Register */
+#define DMAOR 0x40 /* DMA Operation Register */
/* DMAOR definitions */
-#define DMAOR_AE 0x00000004
+#define DMAOR_AE 0x00000004 /* Address Error Flag */
#define DMAOR_NMIF 0x00000002
-#define DMAOR_DME 0x00000001
+#define DMAOR_DME 0x00000001 /* DMA Master Enable */
/* Definitions for the SuperH DMAC */
#define REQ_L 0x00000000
#define ACK_W 0x00020000
#define ACK_H 0x00000000
#define ACK_L 0x00010000
-#define DM_INC 0x00004000
-#define DM_DEC 0x00008000
-#define DM_FIX 0x0000c000
-#define SM_INC 0x00001000
-#define SM_DEC 0x00002000
-#define SM_FIX 0x00003000
+#define DM_INC 0x00004000 /* Destination addresses are incremented */
+#define DM_DEC 0x00008000 /* Destination addresses are decremented */
+#define DM_FIX 0x0000c000 /* Destination address is fixed */
+#define SM_INC 0x00001000 /* Source addresses are incremented */
+#define SM_DEC 0x00002000 /* Source addresses are decremented */
+#define SM_FIX 0x00003000 /* Source address is fixed */
#define RS_IN 0x00000200
#define RS_OUT 0x00000300
+#define RS_AUTO 0x00000400 /* Auto Request */
+#define RS_ERS 0x00000800 /* DMA extended resource selector */
#define TS_BLK 0x00000040
#define TM_BUR 0x00000020
-#define CHCR_DE 0x00000001
-#define CHCR_TE 0x00000002
-#define CHCR_IE 0x00000004
+#define CHCR_DE 0x00000001 /* DMA Enable */
+#define CHCR_TE 0x00000002 /* Transfer End Flag */
+#define CHCR_IE 0x00000004 /* Interrupt Enable */
#endif
{
.slave_id = SHDMA_SLAVE_SCIF0_TX,
.addr = 0xffe0000c,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x21,
}, {
.slave_id = SHDMA_SLAVE_SCIF0_RX,
.addr = 0xffe00014,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x22,
}, {
.slave_id = SHDMA_SLAVE_SCIF1_TX,
.addr = 0xffe1000c,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x25,
}, {
.slave_id = SHDMA_SLAVE_SCIF1_RX,
.addr = 0xffe10014,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x26,
}, {
.slave_id = SHDMA_SLAVE_SCIF2_TX,
.addr = 0xffe2000c,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x29,
}, {
.slave_id = SHDMA_SLAVE_SCIF2_RX,
.addr = 0xffe20014,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2a,
}, {
.slave_id = SHDMA_SLAVE_SIUA_TX,
.addr = 0xa454c098,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xb1,
}, {
.slave_id = SHDMA_SLAVE_SIUA_RX,
.addr = 0xa454c090,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xb2,
}, {
.slave_id = SHDMA_SLAVE_SIUB_TX,
.addr = 0xa454c09c,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xb5,
}, {
.slave_id = SHDMA_SLAVE_SIUB_RX,
.addr = 0xa454c094,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xb6,
}, {
.slave_id = SHDMA_SLAVE_SDHI0_TX,
.addr = 0x04ce0030,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc1,
}, {
.slave_id = SHDMA_SLAVE_SDHI0_RX,
.addr = 0x04ce0030,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc2,
},
};
{
.slave_id = SHDMA_SLAVE_SCIF0_TX,
.addr = 0xffe0000c,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x21,
}, {
.slave_id = SHDMA_SLAVE_SCIF0_RX,
.addr = 0xffe00014,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x22,
}, {
.slave_id = SHDMA_SLAVE_SCIF1_TX,
.addr = 0xffe1000c,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x25,
}, {
.slave_id = SHDMA_SLAVE_SCIF1_RX,
.addr = 0xffe10014,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x26,
}, {
.slave_id = SHDMA_SLAVE_SCIF2_TX,
.addr = 0xffe2000c,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x29,
}, {
.slave_id = SHDMA_SLAVE_SCIF2_RX,
.addr = 0xffe20014,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2a,
}, {
.slave_id = SHDMA_SLAVE_SCIF3_TX,
.addr = 0xa4e30020,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2d,
}, {
.slave_id = SHDMA_SLAVE_SCIF3_RX,
.addr = 0xa4e30024,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2e,
}, {
.slave_id = SHDMA_SLAVE_SCIF4_TX,
.addr = 0xa4e40020,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x31,
}, {
.slave_id = SHDMA_SLAVE_SCIF4_RX,
.addr = 0xa4e40024,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x32,
}, {
.slave_id = SHDMA_SLAVE_SCIF5_TX,
.addr = 0xa4e50020,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x35,
}, {
.slave_id = SHDMA_SLAVE_SCIF5_RX,
.addr = 0xa4e50024,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x36,
}, {
.slave_id = SHDMA_SLAVE_USB0D0_TX,
.addr = 0xA4D80100,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x73,
}, {
.slave_id = SHDMA_SLAVE_USB0D0_RX,
.addr = 0xA4D80100,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x73,
}, {
.slave_id = SHDMA_SLAVE_USB0D1_TX,
.addr = 0xA4D80120,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x77,
}, {
.slave_id = SHDMA_SLAVE_USB0D1_RX,
.addr = 0xA4D80120,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x77,
}, {
.slave_id = SHDMA_SLAVE_USB1D0_TX,
.addr = 0xA4D90100,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xab,
}, {
.slave_id = SHDMA_SLAVE_USB1D0_RX,
.addr = 0xA4D90100,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xab,
}, {
.slave_id = SHDMA_SLAVE_USB1D1_TX,
.addr = 0xA4D90120,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xaf,
}, {
.slave_id = SHDMA_SLAVE_USB1D1_RX,
.addr = 0xA4D90120,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xaf,
}, {
.slave_id = SHDMA_SLAVE_SDHI0_TX,
.addr = 0x04ce0030,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc1,
}, {
.slave_id = SHDMA_SLAVE_SDHI0_RX,
.addr = 0x04ce0030,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc2,
}, {
.slave_id = SHDMA_SLAVE_SDHI1_TX,
.addr = 0x04cf0030,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc9,
}, {
.slave_id = SHDMA_SLAVE_SDHI1_RX,
.addr = 0x04cf0030,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xca,
},
};
{
.slave_id = SHDMA_SLAVE_SDHI_TX,
.addr = 0x1fe50030,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc5,
},
{
.slave_id = SHDMA_SLAVE_SDHI_RX,
.addr = 0x1fe50030,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc6,
},
{
.slave_id = SHDMA_SLAVE_MMCIF_TX,
.addr = 0x1fcb0034,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xd3,
},
{
.slave_id = SHDMA_SLAVE_MMCIF_RX,
.addr = 0x1fcb0034,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xd7,
},
{
.slave_id = SHDMA_SLAVE_SCIF2_TX,
.addr = 0x1f4b000c,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x21,
},
{
.slave_id = SHDMA_SLAVE_SCIF2_RX,
.addr = 0x1f4b0014,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x22,
},
{
.slave_id = SHDMA_SLAVE_SCIF3_TX,
.addr = 0x1f4c000c,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x29,
},
{
.slave_id = SHDMA_SLAVE_SCIF3_RX,
.addr = 0x1f4c0014,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2a,
},
{
.slave_id = SHDMA_SLAVE_SCIF4_TX,
.addr = 0x1f4d000c,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x41,
},
{
.slave_id = SHDMA_SLAVE_SCIF4_RX,
.addr = 0x1f4d0014,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x42,
},
{
.slave_id = SHDMA_SLAVE_RSPI_TX,
.addr = 0xfe480004,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc1,
},
{
.slave_id = SHDMA_SLAVE_RSPI_RX,
.addr = 0xfe480004,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_16BIT),
.mid_rid = 0xc2,
},
{
.slave_id = SHDMA_SLAVE_RIIC0_TX,
.addr = 0x1e500012,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x21,
},
{
.slave_id = SHDMA_SLAVE_RIIC0_RX,
.addr = 0x1e500013,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x22,
},
{
.slave_id = SHDMA_SLAVE_RIIC1_TX,
.addr = 0x1e510012,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x29,
},
{
.slave_id = SHDMA_SLAVE_RIIC1_RX,
.addr = 0x1e510013,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2a,
},
{
.slave_id = SHDMA_SLAVE_RIIC2_TX,
.addr = 0x1e520012,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xa1,
},
{
.slave_id = SHDMA_SLAVE_RIIC2_RX,
.addr = 0x1e520013,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xa2,
},
{
.slave_id = SHDMA_SLAVE_RIIC3_TX,
.addr = 0x1e530012,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xa9,
},
{
.slave_id = SHDMA_SLAVE_RIIC3_RX,
.addr = 0x1e530013,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xaf,
},
{
.slave_id = SHDMA_SLAVE_RIIC4_TX,
.addr = 0x1e540012,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xc5,
},
{
.slave_id = SHDMA_SLAVE_RIIC4_RX,
.addr = 0x1e540013,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0xc6,
},
{
.slave_id = SHDMA_SLAVE_RIIC5_TX,
.addr = 0x1e550012,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x21,
},
{
.slave_id = SHDMA_SLAVE_RIIC5_RX,
.addr = 0x1e550013,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x22,
},
{
.slave_id = SHDMA_SLAVE_RIIC6_TX,
.addr = 0x1e560012,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x29,
},
{
.slave_id = SHDMA_SLAVE_RIIC6_RX,
.addr = 0x1e560013,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x2a,
},
{
.slave_id = SHDMA_SLAVE_RIIC7_TX,
.addr = 0x1e570012,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x41,
},
{
.slave_id = SHDMA_SLAVE_RIIC7_RX,
.addr = 0x1e570013,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x42,
},
{
.slave_id = SHDMA_SLAVE_RIIC8_TX,
.addr = 0x1e580012,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x45,
},
{
.slave_id = SHDMA_SLAVE_RIIC8_RX,
.addr = 0x1e580013,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x46,
},
{
.slave_id = SHDMA_SLAVE_RIIC9_TX,
.addr = 0x1e590012,
- .chcr = SM_INC | 0x800 | 0x40000000 |
+ .chcr = SM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x51,
},
{
.slave_id = SHDMA_SLAVE_RIIC9_RX,
.addr = 0x1e590013,
- .chcr = DM_INC | 0x800 | 0x40000000 |
+ .chcr = DM_INC | RS_ERS | 0x40000000 |
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x52,
},
channels, Memory Mapped to Stream (MM2S) and Stream to
Memory Mapped (S2MM) for the data transfers.
+config DMA_SUN6I
+ tristate "Allwinner A31 SoCs DMA support"
+ depends on MACH_SUN6I || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support for the DMA engine for Allwinner A31 SoCs.
+
+config NBPFAXI_DMA
+ tristate "Renesas Type-AXI NBPF DMA support"
+ select DMA_ENGINE
+ depends on ARM || COMPILE_TEST
+ help
+ Support for "Type-AXI" NBPF DMA IPs from Renesas
+
config DMA_ENGINE
bool
config DMA_OF
def_bool y
depends on OF
+ select DMA_ENGINE
comment "DMA Clients"
depends on DMA_ENGINE
-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
+subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
+subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
obj-y += xilinx/
obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
+obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
- imx-dma
- imx-sdma
- mxs-dma.c
- - dw_dmac
- intel_mid_dma
4. Check other subsystems for dma drivers and merge/move to dmaengine
5. Remove dma_slave_config's dma direction.
if (early_bytes) {
dev_vdbg(&pl08x->adev->dev,
- "%s byte width LLIs (remain 0x%08x)\n",
+ "%s byte width LLIs (remain 0x%08zx)\n",
__func__, bd.remainder);
prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
num_llis++, &total_bytes);
static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
dma_addr_t slave_addr;
dev_dbg(&pl08x->adev->dev,
- "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
+ "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n",
__func__, period_len, buf_len,
direction == DMA_MEM_TO_DEV ? "to" : "from",
plchan->name);
ret = -EINVAL;
goto out;
}
- atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX)
- << (desc_first->tx_width);
- if (atchan->remain_desc < 0) {
+
+ count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX)
+ << desc_first->tx_width;
+ if (atchan->remain_desc < count) {
ret = -EINVAL;
goto out;
- } else {
- ret = atchan->remain_desc;
}
+
+ atchan->remain_desc -= count;
+ ret = atchan->remain_desc;
} else {
/*
* Get residual bytes when current
* @period_len: number of bytes for each period
* @direction: transfer direction, to or from device
* @flags: tx descriptor status flags
- * @context: transfer context (ignored)
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private;
static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
enum dma_slave_buswidth dev_width;
static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
struct jz4740_dma_desc *desc;
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("JZ4740 DMA driver");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
channel_set_bit(dw, CH_EN, dwc->mask);
}
+static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
+{
+ struct dw_desc *desc;
+
+ if (list_empty(&dwc->queue))
+ return;
+
+ list_move(dwc->queue.next, &dwc->active_list);
+ desc = dwc_first_active(dwc);
+ dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
+ dwc_dostart(dwc, desc);
+}
+
/*----------------------------------------------------------------------*/
static void
* the completed ones.
*/
list_splice_init(&dwc->active_list, &list);
- if (!list_empty(&dwc->queue)) {
- list_move(dwc->queue.next, &dwc->active_list);
- dwc_dostart(dwc, dwc_first_active(dwc));
- }
+ dwc_dostart_first_queued(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
/* Try to continue after resetting the channel... */
dwc_chan_disable(dw, dwc);
- if (!list_empty(&dwc->queue)) {
- list_move(dwc->queue.next, &dwc->active_list);
- dwc_dostart(dwc, dwc_first_active(dwc));
- }
+ dwc_dostart_first_queued(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
}
* possible, perhaps even appending to those already submitted
* for DMA. But this is hard to do in a race-free manner.
*/
- if (list_empty(&dwc->active_list)) {
- dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
- desc->txd.cookie);
- list_add_tail(&desc->desc_node, &dwc->active_list);
- dwc_dostart(dwc, dwc_first_active(dwc));
- } else {
- dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
- desc->txd.cookie);
- list_add_tail(&desc->desc_node, &dwc->queue);
- }
+ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
+ list_add_tail(&desc->desc_node, &dwc->queue);
spin_unlock_irqrestore(&dwc->lock, flags);
static void dwc_issue_pending(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ unsigned long flags;
- if (!list_empty(&dwc->queue))
- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (list_empty(&dwc->active_list))
+ dwc_dostart_first_queued(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
static int dwc_alloc_chan_resources(struct dma_chan *chan)
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/of.h>
#include <linux/platform_data/edma.h>
* echan->edesc is NULL and exit.)
*/
if (echan->edesc) {
+ int cyclic = echan->edesc->cyclic;
echan->edesc = NULL;
edma_stop(echan->ch_num);
+ /* Move the cyclic channel back to default queue */
+ if (cyclic)
+ edma_assign_channel_eventq(echan->ch_num,
+ EVENTQ_DEFAULT);
}
vchan_get_all_descriptors(&echan->vchan, &head);
static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long tx_flags, void *context)
+ unsigned long tx_flags)
{
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
edesc->absync = ret;
/*
- * Enable interrupts for every period because callback
- * has to be called for every period.
+ * Enable period interrupt only if it is requested
*/
- edesc->pset[i].param.opt |= TCINTEN;
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ edesc->pset[i].param.opt |= TCINTEN;
}
+ /* Place the cyclic channel to highest priority queue */
+ edma_assign_channel_eventq(echan->ch_num, EVENTQ_0);
+
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
caps->cmd_pause = true;
caps->cmd_terminate = true;
- caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
return 0;
}
ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
if (ecc->dummy_slot < 0) {
dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
- return -EIO;
+ return ecc->dummy_slot;
}
dma_cap_zero(ecc->dma_slave.cap_mask);
}
}
- if (EDMA_CTLRS == 2) {
+ if (!of_have_populated_dt() && EDMA_CTLRS == 2) {
pdev1 = platform_device_register_full(&edma_dev_info1);
if (IS_ERR(pdev1)) {
platform_driver_unregister(&edma_driver);
* @period_len: length of a single period
* @dir: direction of the operation
* @flags: tx descriptor status flags
- * @context: operation context (ignored)
*
* Prepares a descriptor for cyclic DMA operation. This means that once the
* descriptor is submitted, we will be submitting in a @period_len sized
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction dir, unsigned long flags,
- void *context)
+ enum dma_transfer_direction dir, unsigned long flags)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first;
unsigned int slot, bool enable)
{
u32 ch = fsl_chan->vchan.chan.chan_id;
- void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR];
+ void __iomem *muxaddr;
unsigned chans_per_mux, ch_off;
chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+ muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
if (enable)
edma_writeb(fsl_chan->edma,
static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
{
struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
struct dma_chan *chan, *_chan;
+ unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
if (dma_spec->args_count != 2)
return NULL;
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
if (chan->client_count)
continue;
- if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) {
+ if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
chan = dma_get_slave_channel(chan);
if (chan) {
chan->device->privatecnt++;
struct fsldma_chan *chan = to_fsl_chan(tx->chan);
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
- unsigned long flags;
dma_cookie_t cookie = -EINVAL;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
+
+#ifdef CONFIG_PM
+ if (unlikely(chan->pm_state != RUNNING)) {
+ chan_dbg(chan, "cannot submit due to suspend\n");
+ spin_unlock_bh(&chan->desc_lock);
+ return -1;
+ }
+#endif
/*
* assign cookies to all of the software descriptors
/* put this transaction onto the tail of the pending queue */
append_ld_queue(chan, desc);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return cookie;
}
return desc;
}
+/**
+ * fsldma_clean_completed_descriptor - free all descriptors which
+ * has been completed and acked
+ * @chan: Freescale DMA channel
+ *
+ * This function is used on all completed and acked descriptors.
+ * All descriptors should only be freed in this function.
+ */
+static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
+{
+ struct fsl_desc_sw *desc, *_desc;
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
+ if (async_tx_test_ack(&desc->async_tx))
+ fsl_dma_free_descriptor(chan, desc);
+}
+
+/**
+ * fsldma_run_tx_complete_actions - cleanup a single link descriptor
+ * @chan: Freescale DMA channel
+ * @desc: descriptor to cleanup and free
+ * @cookie: Freescale DMA transaction identifier
+ *
+ * This function is used on a descriptor which has been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies.
+ */
+static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc, dma_cookie_t cookie)
+{
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
+ dma_cookie_t ret = cookie;
+
+ BUG_ON(txd->cookie < 0);
+
+ if (txd->cookie > 0) {
+ ret = txd->cookie;
+
+ /* Run the link descriptor callback function */
+ if (txd->callback) {
+ chan_dbg(chan, "LD %p callback\n", desc);
+ txd->callback(txd->callback_param);
+ }
+ }
+
+ /* Run any dependencies */
+ dma_run_dependencies(txd);
+
+ return ret;
+}
+
+/**
+ * fsldma_clean_running_descriptor - move the completed descriptor from
+ * ld_running to ld_completed
+ * @chan: Freescale DMA channel
+ * @desc: the descriptor which is completed
+ *
+ * Free the descriptor directly if acked by async_tx api, or move it to
+ * queue ld_completed.
+ */
+static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ /* Remove from the list of transactions */
+ list_del(&desc->node);
+
+ /*
+ * the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->async_tx)) {
+ /*
+ * Move this descriptor to the list of descriptors which is
+ * completed, but still awaiting the 'ack' bit to be set.
+ */
+ list_add_tail(&desc->node, &chan->ld_completed);
+ return;
+ }
+
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+}
+
/**
* fsl_chan_xfer_ld_queue - transfer any pending transactions
* @chan : Freescale DMA channel
}
/**
- * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
+ * fsldma_cleanup_descriptors - cleanup link descriptors which are completed
+ * and move them to ld_completed to free until flag 'ack' is set
* @chan: Freescale DMA channel
- * @desc: descriptor to cleanup and free
*
- * This function is used on a descriptor which has been executed by the DMA
- * controller. It will run any callbacks, submit any dependencies, and then
- * free the descriptor.
+ * This function is used on descriptors which have been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies, then
+ * free these descriptors if flag 'ack' is set.
*/
-static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
+static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
{
- struct dma_async_tx_descriptor *txd = &desc->async_tx;
+ struct fsl_desc_sw *desc, *_desc;
+ dma_cookie_t cookie = 0;
+ dma_addr_t curr_phys = get_cdar(chan);
+ int seen_current = 0;
+
+ fsldma_clean_completed_descriptor(chan);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
+ /*
+ * do not advance past the current descriptor loaded into the
+ * hardware channel, subsequent descriptors are either in
+ * process or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /*
+ * stop the search if we reach the current descriptor and the
+ * channel is busy
+ */
+ if (desc->async_tx.phys == curr_phys) {
+ seen_current = 1;
+ if (!dma_is_idle(chan))
+ break;
+ }
+
+ cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);
- /* Run the link descriptor callback function */
- if (txd->callback) {
- chan_dbg(chan, "LD %p callback\n", desc);
- txd->callback(txd->callback_param);
+ fsldma_clean_running_descriptor(chan, desc);
}
- /* Run any dependencies */
- dma_run_dependencies(txd);
+ /*
+ * Start any pending transactions automatically
+ *
+ * In the ideal case, we keep the DMA controller busy while we go
+ * ahead and free the descriptors below.
+ */
+ fsl_chan_xfer_ld_queue(chan);
- dma_descriptor_unmap(txd);
- chan_dbg(chan, "LD %p free\n", desc);
- dma_pool_free(chan->desc_pool, desc, txd->phys);
+ if (cookie > 0)
+ chan->common.completed_cookie = cookie;
}
/**
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- unsigned long flags;
chan_dbg(chan, "free all channel resources\n");
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
+ fsldma_cleanup_descriptors(chan);
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ fsldma_free_desc_list(chan, &chan->ld_completed);
+ spin_unlock_bh(&chan->desc_lock);
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
{
struct dma_slave_config *config;
struct fsldma_chan *chan;
- unsigned long flags;
int size;
if (!dchan)
switch (cmd) {
case DMA_TERMINATE_ALL:
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
/* Halt the DMA engine */
dma_halt(chan);
/* Remove and free all of the descriptors in the LD queue */
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
+ fsldma_free_desc_list(chan, &chan->ld_completed);
chan->idle = true;
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return 0;
case DMA_SLAVE_CONFIG:
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- unsigned long flags;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
fsl_chan_xfer_ld_queue(chan);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
}
/**
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_bh(&chan->desc_lock);
+ fsldma_cleanup_descriptors(chan);
+ spin_unlock_bh(&chan->desc_lock);
+
return dma_cookie_status(dchan, cookie, txstate);
}
static void dma_do_tasklet(unsigned long data)
{
struct fsldma_chan *chan = (struct fsldma_chan *)data;
- struct fsl_desc_sw *desc, *_desc;
- LIST_HEAD(ld_cleanup);
- unsigned long flags;
chan_dbg(chan, "tasklet entry\n");
- spin_lock_irqsave(&chan->desc_lock, flags);
-
- /* update the cookie if we have some descriptors to cleanup */
- if (!list_empty(&chan->ld_running)) {
- dma_cookie_t cookie;
-
- desc = to_fsl_desc(chan->ld_running.prev);
- cookie = desc->async_tx.cookie;
- dma_cookie_complete(&desc->async_tx);
-
- chan_dbg(chan, "completed_cookie=%d\n", cookie);
- }
-
- /*
- * move the descriptors to a temporary list so we can drop the lock
- * during the entire cleanup operation
- */
- list_splice_tail_init(&chan->ld_running, &ld_cleanup);
+ spin_lock_bh(&chan->desc_lock);
/* the hardware is now idle and ready for more */
chan->idle = true;
- /*
- * Start any pending transactions automatically
- *
- * In the ideal case, we keep the DMA controller busy while we go
- * ahead and free the descriptors below.
- */
- fsl_chan_xfer_ld_queue(chan);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- /* Run the callback for each descriptor, in order */
- list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
-
- /* Remove from the list of transactions */
- list_del(&desc->node);
+ /* Run all cleanup for descriptors which have been completed */
+ fsldma_cleanup_descriptors(chan);
- /* Run all cleanup for this descriptor */
- fsldma_cleanup_descriptor(chan, desc);
- }
+ spin_unlock_bh(&chan->desc_lock);
chan_dbg(chan, "tasklet exit\n");
}
spin_lock_init(&chan->desc_lock);
INIT_LIST_HEAD(&chan->ld_pending);
INIT_LIST_HEAD(&chan->ld_running);
+ INIT_LIST_HEAD(&chan->ld_completed);
chan->idle = true;
+#ifdef CONFIG_PM
+ chan->pm_state = RUNNING;
+#endif
chan->common.device = &fdev->common;
dma_cookie_init(&chan->common);
return 0;
}
+#ifdef CONFIG_PM
+static int fsldma_suspend_late(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fsldma_device *fdev = platform_get_drvdata(pdev);
+ struct fsldma_chan *chan;
+ int i;
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ spin_lock_bh(&chan->desc_lock);
+ if (unlikely(!chan->idle))
+ goto out;
+ chan->regs_save.mr = get_mr(chan);
+ chan->pm_state = SUSPENDED;
+ spin_unlock_bh(&chan->desc_lock);
+ }
+ return 0;
+
+out:
+ for (; i >= 0; i--) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+ chan->pm_state = RUNNING;
+ spin_unlock_bh(&chan->desc_lock);
+ }
+ return -EBUSY;
+}
+
+static int fsldma_resume_early(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fsldma_device *fdev = platform_get_drvdata(pdev);
+ struct fsldma_chan *chan;
+ u32 mode;
+ int i;
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ spin_lock_bh(&chan->desc_lock);
+ mode = chan->regs_save.mr
+ & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
+ set_mr(chan, mode);
+ chan->pm_state = RUNNING;
+ spin_unlock_bh(&chan->desc_lock);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops fsldma_pm_ops = {
+ .suspend_late = fsldma_suspend_late,
+ .resume_early = fsldma_resume_early,
+};
+#endif
+
static const struct of_device_id fsldma_of_ids[] = {
{ .compatible = "fsl,elo3-dma", },
{ .compatible = "fsl,eloplus-dma", },
.name = "fsl-elo-dma",
.owner = THIS_MODULE,
.of_match_table = fsldma_of_ids,
+#ifdef CONFIG_PM
+ .pm = &fsldma_pm_ops,
+#endif
},
.probe = fsldma_of_probe,
.remove = fsldma_of_remove,
#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
#define FSL_DMA_CHAN_START_EXT 0x00002000
+#ifdef CONFIG_PM
+struct fsldma_chan_regs_save {
+ u32 mr;
+};
+
+enum fsldma_pm_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
+#endif
+
struct fsldma_chan {
char name[8]; /* Channel name */
struct fsldma_chan_regs __iomem *regs;
spinlock_t desc_lock; /* Descriptor operation lock */
- struct list_head ld_pending; /* Link descriptors queue */
- struct list_head ld_running; /* Link descriptors queue */
+ /*
+ * Descriptors which are queued to run, but have not yet been
+ * submitted to the hardware for execution
+ */
+ struct list_head ld_pending;
+ /*
+ * Descriptors which are currently being executed by the hardware
+ */
+ struct list_head ld_running;
+ /*
+ * Descriptors which have finished execution by the hardware. These
+ * descriptors have already had their cleanup actions run. They are
+ * waiting for the ACK bit to be set by the async_tx API.
+ */
+ struct list_head ld_completed; /* Link descriptors queue */
struct dma_chan common; /* DMA common channel */
struct dma_pool *desc_pool; /* Descriptors pool */
struct device *dev; /* Channel device */
struct tasklet_struct tasklet;
u32 feature;
bool idle; /* DMA controller is idle */
+#ifdef CONFIG_PM
+ struct fsldma_chan_regs_save regs_save;
+ enum fsldma_pm_state pm_state;
+#endif
void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
unsigned int chn_count;
unsigned int chn_real_count;
struct tasklet_struct tasklet;
+ struct imx_dma_data data;
};
#define IMX_DMA_SG_LOOP BIT(0)
emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
per_2_per = sdma->script_addrs->per_2_per_addr;
break;
+ case IMX_DMATYPE_ASRC_SP:
+ per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ per_2_per = sdma->script_addrs->per_2_per_addr;
+ break;
case IMX_DMATYPE_MSHC:
per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
int channel = sdmac->channel;
int ret = -EBUSY;
- sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
+ sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
+ GFP_KERNEL);
if (!sdmac->bd) {
ret = -ENOMEM;
goto out;
}
- memset(sdmac->bd, 0, PAGE_SIZE);
-
sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
struct imx_dma_data *data = fn_param;
if (!imx_dma_is_general_purpose(chan))
return false;
- chan->private = data;
+ sdmac->data = *data;
+ chan->private = &sdmac->data;
return true;
}
#ifdef DEBUG
if (chan->chan_id == IDMAC_IC_7) {
ic_sof = ipu_irq_map(69);
- if (ic_sof > 0)
- request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
+ if (ic_sof > 0) {
+ ret = request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
+ if (ret)
+ dev_err(&chan->dev->device, "request irq failed for IC SOF");
+ }
ic_eof = ipu_irq_map(70);
- if (ic_eof > 0)
- request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
+ if (ic_eof > 0) {
+ ret = request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
+ if (ret)
+ dev_err(&chan->dev->device, "request irq failed for IC EOF");
+ }
}
#endif
mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
dma_addr_t buf_addr, size_t len, size_t period_len,
enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct mmp_pdma_chan *chan;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
struct mmp_tdma_desc *desc;
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
+#include <linux/of_dma.h>
#include <linux/of_platform.h>
#include <linux/random.h>
if (retval)
goto err_free2;
- return retval;
+ /* Register with OF helpers for DMA lookups (nonfatal) */
+ if (dev->of_node) {
+ retval = of_dma_controller_register(dev->of_node,
+ of_dma_xlate_by_chan_id, mdma);
+ if (retval)
+ dev_warn(dev, "Could not register for OF lookup\n");
+ }
+
+ return 0;
err_free2:
if (mdma->is_mpc8308)
struct device *dev = &op->dev;
struct mpc_dma *mdma = dev_get_drvdata(dev);
+ if (dev->of_node)
+ of_dma_controller_free(dev->of_node);
dma_async_device_unregister(&mdma->dma);
if (mdma->is_mpc8308) {
free_irq(mdma->irq2, mdma);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int ret;
- mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
- CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
- GFP_KERNEL);
+ mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
+ CCW_BLOCK_SIZE,
+ &mxs_chan->ccw_phys, GFP_KERNEL);
if (!mxs_chan->ccw) {
ret = -ENOMEM;
goto err_alloc;
}
- memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE);
-
if (mxs_chan->chan_irq != NO_IRQ) {
ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
0, "mxs-dma", mxs_dma);
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
--- /dev/null
+/*
+ * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/dma/nbpfaxi.h>
+
+#include "dmaengine.h"
+
+#define NBPF_REG_CHAN_OFFSET 0
+#define NBPF_REG_CHAN_SIZE 0x40
+
+/* Channel Current Transaction Byte register */
+#define NBPF_CHAN_CUR_TR_BYTE 0x20
+
+/* Channel Status register */
+#define NBPF_CHAN_STAT 0x24
+#define NBPF_CHAN_STAT_EN 1
+#define NBPF_CHAN_STAT_TACT 4
+#define NBPF_CHAN_STAT_ERR 0x10
+#define NBPF_CHAN_STAT_END 0x20
+#define NBPF_CHAN_STAT_TC 0x40
+#define NBPF_CHAN_STAT_DER 0x400
+
+/* Channel Control register */
+#define NBPF_CHAN_CTRL 0x28
+#define NBPF_CHAN_CTRL_SETEN 1
+#define NBPF_CHAN_CTRL_CLREN 2
+#define NBPF_CHAN_CTRL_STG 4
+#define NBPF_CHAN_CTRL_SWRST 8
+#define NBPF_CHAN_CTRL_CLRRQ 0x10
+#define NBPF_CHAN_CTRL_CLREND 0x20
+#define NBPF_CHAN_CTRL_CLRTC 0x40
+#define NBPF_CHAN_CTRL_SETSUS 0x100
+#define NBPF_CHAN_CTRL_CLRSUS 0x200
+
+/* Channel Configuration register */
+#define NBPF_CHAN_CFG 0x2c
+#define NBPF_CHAN_CFG_SEL 7 /* terminal SELect: 0..7 */
+#define NBPF_CHAN_CFG_REQD 8 /* REQuest Direction: DMAREQ is 0: input, 1: output */
+#define NBPF_CHAN_CFG_LOEN 0x10 /* LOw ENable: low DMA request line is: 0: inactive, 1: active */
+#define NBPF_CHAN_CFG_HIEN 0x20 /* HIgh ENable: high DMA request line is: 0: inactive, 1: active */
+#define NBPF_CHAN_CFG_LVL 0x40 /* LeVeL: DMA request line is sensed as 0: edge, 1: level */
+#define NBPF_CHAN_CFG_AM 0x700 /* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */
+#define NBPF_CHAN_CFG_SDS 0xf000 /* Source Data Size: 0: 8 bits,... , 7: 1024 bits */
+#define NBPF_CHAN_CFG_DDS 0xf0000 /* Destination Data Size: as above */
+#define NBPF_CHAN_CFG_SAD 0x100000 /* Source ADdress counting: 0: increment, 1: fixed */
+#define NBPF_CHAN_CFG_DAD 0x200000 /* Destination ADdress counting: 0: increment, 1: fixed */
+#define NBPF_CHAN_CFG_TM 0x400000 /* Transfer Mode: 0: single, 1: block TM */
+#define NBPF_CHAN_CFG_DEM 0x1000000 /* DMAEND interrupt Mask */
+#define NBPF_CHAN_CFG_TCM 0x2000000 /* DMATCO interrupt Mask */
+#define NBPF_CHAN_CFG_SBE 0x8000000 /* Sweep Buffer Enable */
+#define NBPF_CHAN_CFG_RSEL 0x10000000 /* RM: Register Set sELect */
+#define NBPF_CHAN_CFG_RSW 0x20000000 /* RM: Register Select sWitch */
+#define NBPF_CHAN_CFG_REN 0x40000000 /* RM: Register Set Enable */
+#define NBPF_CHAN_CFG_DMS 0x80000000 /* 0: register mode (RM), 1: link mode (LM) */
+
+#define NBPF_CHAN_NXLA 0x38
+#define NBPF_CHAN_CRLA 0x3c
+
+/* Link Header field */
+#define NBPF_HEADER_LV 1
+#define NBPF_HEADER_LE 2
+#define NBPF_HEADER_WBD 4
+#define NBPF_HEADER_DIM 8
+
+#define NBPF_CTRL 0x300
+#define NBPF_CTRL_PR 1 /* 0: fixed priority, 1: round robin */
+#define NBPF_CTRL_LVINT 2 /* DMAEND and DMAERR signalling: 0: pulse, 1: level */
+
+#define NBPF_DSTAT_ER 0x314
+#define NBPF_DSTAT_END 0x318
+
+#define NBPF_DMA_BUSWIDTHS \
+ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+struct nbpf_config {
+ int num_channels;
+ int buffer_size;
+};
+
+/*
+ * We've got 3 types of objects, used to describe DMA transfers:
+ * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object
+ * in it, used to communicate with the user
+ * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer
+ * queuing, these must be DMAable, using either the streaming DMA API or
+ * allocated from coherent memory - one per SG segment
+ * 3. one per SG segment descriptors, used to manage HW link descriptors from
+ * (2). They do not have to be DMAable. They can either be (a) allocated
+ * together with link descriptors as mixed (DMA / CPU) objects, or (b)
+ * separately. Even if allocated separately it would be best to link them
+ * to link descriptors once during channel resource allocation and always
+ * use them as a single object.
+ * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be
+ * treated as a single SG segment descriptor.
+ */
+
+struct nbpf_link_reg {
+ u32 header;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 transaction_size;
+ u32 config;
+ u32 interval;
+ u32 extension;
+ u32 next;
+} __packed;
+
+struct nbpf_device;
+struct nbpf_channel;
+struct nbpf_desc;
+
+struct nbpf_link_desc {
+ struct nbpf_link_reg *hwdesc;
+ dma_addr_t hwdesc_dma_addr;
+ struct nbpf_desc *desc;
+ struct list_head node;
+};
+
+/**
+ * struct nbpf_desc - DMA transfer descriptor
+ * @async_tx: dmaengine object
+ * @user_wait: waiting for a user ack
+ * @length: total transfer length
+ * @sg: list of hardware descriptors, represented by struct nbpf_link_desc
+ * @node: member in channel descriptor lists
+ */
+struct nbpf_desc {
+ struct dma_async_tx_descriptor async_tx;
+ bool user_wait;
+ size_t length;
+ struct nbpf_channel *chan;
+ struct list_head sg;
+ struct list_head node;
+};
+
+/* Take a wild guess: allocate 4 segments per descriptor */
+#define NBPF_SEGMENTS_PER_DESC 4
+#define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \
+ (sizeof(struct nbpf_desc) + \
+ NBPF_SEGMENTS_PER_DESC * \
+ (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg))))
+#define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE)
+
+struct nbpf_desc_page {
+ struct list_head node;
+ struct nbpf_desc desc[NBPF_DESCS_PER_PAGE];
+ struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE];
+ struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE];
+};
+
+/**
+ * struct nbpf_channel - one DMAC channel
+ * @dma_chan: standard dmaengine channel object
+ * @base: register address base
+ * @nbpf: DMAC
+ * @name: IRQ name
+ * @irq: IRQ number
+ * @slave_addr: address for slave DMA
+ * @slave_width:slave data size in bytes
+ * @slave_burst:maximum slave burst size in bytes
+ * @terminal: DMA terminal, assigned to this channel
+ * @dmarq_cfg: DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG
+ * @flags: configuration flags from DT
+ * @lock: protect descriptor lists
+ * @free_links: list of free link descriptors
+ * @free: list of free descriptors
+ * @queued: list of queued descriptors
+ * @active: list of descriptors, scheduled for processing
+ * @done: list of completed descriptors, waiting post-processing
+ * @desc_page: list of additionally allocated descriptor pages - if any
+ */
+struct nbpf_channel {
+ struct dma_chan dma_chan;
+ struct tasklet_struct tasklet;
+ void __iomem *base;
+ struct nbpf_device *nbpf;
+ char name[16];
+ int irq;
+ dma_addr_t slave_src_addr;
+ size_t slave_src_width;
+ size_t slave_src_burst;
+ dma_addr_t slave_dst_addr;
+ size_t slave_dst_width;
+ size_t slave_dst_burst;
+ unsigned int terminal;
+ u32 dmarq_cfg;
+ unsigned long flags;
+ spinlock_t lock;
+ struct list_head free_links;
+ struct list_head free;
+ struct list_head queued;
+ struct list_head active;
+ struct list_head done;
+ struct list_head desc_page;
+ struct nbpf_desc *running;
+ bool paused;
+};
+
+struct nbpf_device {
+ struct dma_device dma_dev;
+ void __iomem *base;
+ struct clk *clk;
+ const struct nbpf_config *config;
+ struct nbpf_channel chan[];
+};
+
+enum nbpf_model {
+ NBPF1B4,
+ NBPF1B8,
+ NBPF1B16,
+ NBPF4B4,
+ NBPF4B8,
+ NBPF4B16,
+ NBPF8B4,
+ NBPF8B8,
+ NBPF8B16,
+};
+
+static struct nbpf_config nbpf_cfg[] = {
+ [NBPF1B4] = {
+ .num_channels = 1,
+ .buffer_size = 4,
+ },
+ [NBPF1B8] = {
+ .num_channels = 1,
+ .buffer_size = 8,
+ },
+ [NBPF1B16] = {
+ .num_channels = 1,
+ .buffer_size = 16,
+ },
+ [NBPF4B4] = {
+ .num_channels = 4,
+ .buffer_size = 4,
+ },
+ [NBPF4B8] = {
+ .num_channels = 4,
+ .buffer_size = 8,
+ },
+ [NBPF4B16] = {
+ .num_channels = 4,
+ .buffer_size = 16,
+ },
+ [NBPF8B4] = {
+ .num_channels = 8,
+ .buffer_size = 4,
+ },
+ [NBPF8B8] = {
+ .num_channels = 8,
+ .buffer_size = 8,
+ },
+ [NBPF8B16] = {
+ .num_channels = 8,
+ .buffer_size = 16,
+ },
+};
+
+#define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan)
+
+/*
+ * dmaengine drivers seem to have a lot in common and instead of sharing more
+ * code, they reimplement those common algorithms independently. In this driver
+ * we try to separate the hardware-specific part from the (largely) generic
+ * part. This improves code readability and makes it possible in the future to
+ * reuse the generic code in form of a helper library. That generic code should
+ * be suitable for various DMA controllers, using transfer descriptors in RAM
+ * and pushing one SG list at a time to the DMA controller.
+ */
+
+/* Hardware-specific part */
+
+static inline u32 nbpf_chan_read(struct nbpf_channel *chan,
+ unsigned int offset)
+{
+ u32 data = ioread32(chan->base + offset);
+ dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
+ __func__, chan->base, offset, data);
+ return data;
+}
+
+static inline void nbpf_chan_write(struct nbpf_channel *chan,
+ unsigned int offset, u32 data)
+{
+ iowrite32(data, chan->base + offset);
+ dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
+ __func__, chan->base, offset, data);
+}
+
+static inline u32 nbpf_read(struct nbpf_device *nbpf,
+ unsigned int offset)
+{
+ u32 data = ioread32(nbpf->base + offset);
+ dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
+ __func__, nbpf->base, offset, data);
+ return data;
+}
+
+static inline void nbpf_write(struct nbpf_device *nbpf,
+ unsigned int offset, u32 data)
+{
+ iowrite32(data, nbpf->base + offset);
+ dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
+ __func__, nbpf->base, offset, data);
+}
+
+static void nbpf_chan_halt(struct nbpf_channel *chan)
+{
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
+}
+
+static bool nbpf_status_get(struct nbpf_channel *chan)
+{
+ u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END);
+
+ return status & BIT(chan - chan->nbpf->chan);
+}
+
+static void nbpf_status_ack(struct nbpf_channel *chan)
+{
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND);
+}
+
+static u32 nbpf_error_get(struct nbpf_device *nbpf)
+{
+ return nbpf_read(nbpf, NBPF_DSTAT_ER);
+}
+
+static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error)
+{
+ return nbpf->chan + __ffs(error);
+}
+
+static void nbpf_error_clear(struct nbpf_channel *chan)
+{
+ u32 status;
+ int i;
+
+ /* Stop the channel, make sure DMA has been aborted */
+ nbpf_chan_halt(chan);
+
+ for (i = 1000; i; i--) {
+ status = nbpf_chan_read(chan, NBPF_CHAN_STAT);
+ if (!(status & NBPF_CHAN_STAT_TACT))
+ break;
+ cpu_relax();
+ }
+
+ if (!i)
+ dev_err(chan->dma_chan.device->dev,
+ "%s(): abort timeout, channel status 0x%x\n", __func__, status);
+
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST);
+}
+
+static int nbpf_start(struct nbpf_desc *desc)
+{
+ struct nbpf_channel *chan = desc->chan;
+ struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node);
+
+ nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr);
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS);
+ chan->paused = false;
+
+ /* Software trigger MEMCPY - only MEMCPY uses the block mode */
+ if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM)
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG);
+
+ dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__,
+ nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA));
+
+ return 0;
+}
+
+static void nbpf_chan_prepare(struct nbpf_channel *chan)
+{
+ chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) |
+ (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) |
+ (chan->flags & NBPF_SLAVE_RQ_LEVEL ?
+ NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) |
+ chan->terminal;
+}
+
+static void nbpf_chan_prepare_default(struct nbpf_channel *chan)
+{
+ /* Don't output DMAACK */
+ chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400;
+ chan->terminal = 0;
+ chan->flags = 0;
+}
+
+static void nbpf_chan_configure(struct nbpf_channel *chan)
+{
+ /*
+ * We assume, that only the link mode and DMA request line configuration
+ * have to be set in the configuration register manually. Dynamic
+ * per-transfer configuration will be loaded from transfer descriptors.
+ */
+ nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
+}
+
+static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size)
+{
+ /* Maximum supported bursts depend on the buffer size */
+ return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8));
+}
+
+static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
+ enum dma_slave_buswidth width, u32 burst)
+{
+ size_t size;
+
+ if (!burst)
+ burst = 1;
+
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ size = 8 * burst;
+ break;
+
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ size = 4 * burst;
+ break;
+
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ size = 2 * burst;
+ break;
+
+ default:
+ pr_warn("%s(): invalid bus width %u\n", __func__, width);
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ size = burst;
+ }
+
+ return nbpf_xfer_ds(nbpf, size);
+}
+
+/*
+ * We need a way to recognise slaves, whose data is sent "raw" over the bus,
+ * i.e. it isn't known in advance how many bytes will be received. Therefore
+ * the slave driver has to provide a "large enough" buffer and either read the
+ * buffer, when it is full, or detect, that some data has arrived, then wait for
+ * a timeout, if no more data arrives - receive what's already there. We want to
+ * handle such slaves in a special way to allow an optimised mode for other
+ * users, for whom the amount of data is known in advance. So far there's no way
+ * to recognise such slaves. We use a data-width check to distinguish between
+ * the SD host and the PL011 UART.
+ */
+
+static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
+ enum dma_transfer_direction direction,
+ dma_addr_t src, dma_addr_t dst, size_t size, bool last)
+{
+ struct nbpf_link_reg *hwdesc = ldesc->hwdesc;
+ struct nbpf_desc *desc = ldesc->desc;
+ struct nbpf_channel *chan = desc->chan;
+ struct device *dev = chan->dma_chan.device->dev;
+ size_t mem_xfer, slave_xfer;
+ bool can_burst;
+
+ hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV |
+ (last ? NBPF_HEADER_LE : 0);
+
+ hwdesc->src_addr = src;
+ hwdesc->dst_addr = dst;
+ hwdesc->transaction_size = size;
+
+ /*
+ * set config: SAD, DAD, DDS, SDS, etc.
+ * Note on transfer sizes: the DMAC can perform unaligned DMA transfers,
+ * but it is important to have transaction size a multiple of both
+ * receiver and transmitter transfer sizes. It is also possible to use
+ * different RAM and device transfer sizes, and it does work well with
+ * some devices, e.g. with V08R07S01E SD host controllers, which can use
+ * 128 byte transfers. But this doesn't work with other devices,
+ * especially when the transaction size is unknown. This is the case,
+ * e.g. with serial drivers like amba-pl011.c. For reception it sets up
+ * the transaction size of 4K and if fewer bytes are received, it
+ * pauses DMA and reads out data received via DMA as well as those left
+ * in the Rx FIFO. For this to work with the RAM side using burst
+ * transfers we enable the SBE bit and terminate the transfer in our
+ * DMA_PAUSE handler.
+ */
+ mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
+
+ switch (direction) {
+ case DMA_DEV_TO_MEM:
+ can_burst = chan->slave_src_width >= 3;
+ slave_xfer = min(mem_xfer, can_burst ?
+ chan->slave_src_burst : chan->slave_src_width);
+ /*
+ * Is the slave narrower than 64 bits, i.e. isn't using the full
+ * bus width and cannot use bursts?
+ */
+ if (mem_xfer > chan->slave_src_burst && !can_burst)
+ mem_xfer = chan->slave_src_burst;
+ /* Device-to-RAM DMA is unreliable without REQD set */
+ hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) |
+ (NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD |
+ NBPF_CHAN_CFG_SBE;
+ break;
+
+ case DMA_MEM_TO_DEV:
+ slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ?
+ chan->slave_dst_burst : chan->slave_dst_width);
+ hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
+ (NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD;
+ break;
+
+ case DMA_MEM_TO_MEM:
+ hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM |
+ (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
+ (NBPF_CHAN_CFG_DDS & (mem_xfer << 16));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) |
+ NBPF_CHAN_CFG_DMS;
+
+ dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n",
+ __func__, &ldesc->hwdesc_dma_addr, hwdesc->header,
+ hwdesc->config, size, &src, &dst);
+
+ dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc),
+ DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static size_t nbpf_bytes_left(struct nbpf_channel *chan)
+{
+ return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE);
+}
+
+static void nbpf_configure(struct nbpf_device *nbpf)
+{
+ nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
+}
+
+static void nbpf_pause(struct nbpf_channel *chan)
+{
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
+ /* See comment in nbpf_prep_one() */
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
+}
+
+/* Generic part */
+
+/* DMA ENGINE functions */
+static void nbpf_issue_pending(struct dma_chan *dchan)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ unsigned long flags;
+
+ dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (list_empty(&chan->queued))
+ goto unlock;
+
+ list_splice_tail_init(&chan->queued, &chan->active);
+
+ if (!chan->running) {
+ struct nbpf_desc *desc = list_first_entry(&chan->active,
+ struct nbpf_desc, node);
+ if (!nbpf_start(desc))
+ chan->running = desc;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static enum dma_status nbpf_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ enum dma_status status = dma_cookie_status(dchan, cookie, state);
+
+ if (state) {
+ dma_cookie_t running;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ running = chan->running ? chan->running->async_tx.cookie : -EINVAL;
+
+ if (cookie == running) {
+ state->residue = nbpf_bytes_left(chan);
+ dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__,
+ state->residue);
+ } else if (status == DMA_IN_PROGRESS) {
+ struct nbpf_desc *desc;
+ bool found = false;
+
+ list_for_each_entry(desc, &chan->active, node)
+ if (desc->async_tx.cookie == cookie) {
+ found = true;
+ break;
+ }
+
+ if (!found)
+ list_for_each_entry(desc, &chan->queued, node)
+ if (desc->async_tx.cookie == cookie) {
+ found = true;
+ break;
+
+ }
+
+ state->residue = found ? desc->length : 0;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+
+ if (chan->paused)
+ status = DMA_PAUSED;
+
+ return status;
+}
+
+static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx);
+ struct nbpf_channel *chan = desc->chan;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->node, &chan->queued);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie);
+
+ return cookie;
+}
+
+static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
+{
+ struct dma_chan *dchan = &chan->dma_chan;
+ struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ struct nbpf_link_desc *ldesc;
+ struct nbpf_link_reg *hwdesc;
+ struct nbpf_desc *desc;
+ LIST_HEAD(head);
+ LIST_HEAD(lhead);
+ int i;
+ struct device *dev = dchan->device->dev;
+
+ if (!dpage)
+ return -ENOMEM;
+
+ dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n",
+ __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage));
+
+ for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc;
+ i < ARRAY_SIZE(dpage->ldesc);
+ i++, ldesc++, hwdesc++) {
+ ldesc->hwdesc = hwdesc;
+ list_add_tail(&ldesc->node, &lhead);
+ ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
+ hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
+
+ dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
+ hwdesc, &ldesc->hwdesc_dma_addr);
+ }
+
+ for (i = 0, desc = dpage->desc;
+ i < ARRAY_SIZE(dpage->desc);
+ i++, desc++) {
+ dma_async_tx_descriptor_init(&desc->async_tx, dchan);
+ desc->async_tx.tx_submit = nbpf_tx_submit;
+ desc->chan = chan;
+ INIT_LIST_HEAD(&desc->sg);
+ list_add_tail(&desc->node, &head);
+ }
+
+ /*
+ * This function cannot be called from interrupt context, so, no need to
+ * save flags
+ */
+ spin_lock_irq(&chan->lock);
+ list_splice_tail(&lhead, &chan->free_links);
+ list_splice_tail(&head, &chan->free);
+ list_add(&dpage->node, &chan->desc_page);
+ spin_unlock_irq(&chan->lock);
+
+ return ARRAY_SIZE(dpage->desc);
+}
+
+static void nbpf_desc_put(struct nbpf_desc *desc)
+{
+ struct nbpf_channel *chan = desc->chan;
+ struct nbpf_link_desc *ldesc, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ list_for_each_entry_safe(ldesc, tmp, &desc->sg, node)
+ list_move(&ldesc->node, &chan->free_links);
+
+ list_add(&desc->node, &chan->free);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void nbpf_scan_acked(struct nbpf_channel *chan)
+{
+ struct nbpf_desc *desc, *tmp;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->lock, flags);
+ list_for_each_entry_safe(desc, tmp, &chan->done, node)
+ if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) {
+ list_move(&desc->node, &head);
+ desc->user_wait = false;
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, tmp, &head, node) {
+ list_del(&desc->node);
+ nbpf_desc_put(desc);
+ }
+}
+
+/*
+ * We have to allocate descriptors with the channel lock dropped. This means,
+ * before we re-acquire the lock buffers can be taken already, so we have to
+ * re-check after re-acquiring the lock and possibly retry, if buffers are gone
+ * again.
+ */
+static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len)
+{
+ struct nbpf_desc *desc = NULL;
+ struct nbpf_link_desc *ldesc, *prev = NULL;
+
+ nbpf_scan_acked(chan);
+
+ spin_lock_irq(&chan->lock);
+
+ do {
+ int i = 0, ret;
+
+ if (list_empty(&chan->free)) {
+ /* No more free descriptors */
+ spin_unlock_irq(&chan->lock);
+ ret = nbpf_desc_page_alloc(chan);
+ if (ret < 0)
+ return NULL;
+ spin_lock_irq(&chan->lock);
+ continue;
+ }
+ desc = list_first_entry(&chan->free, struct nbpf_desc, node);
+ list_del(&desc->node);
+
+ do {
+ if (list_empty(&chan->free_links)) {
+ /* No more free link descriptors */
+ spin_unlock_irq(&chan->lock);
+ ret = nbpf_desc_page_alloc(chan);
+ if (ret < 0) {
+ nbpf_desc_put(desc);
+ return NULL;
+ }
+ spin_lock_irq(&chan->lock);
+ continue;
+ }
+
+ ldesc = list_first_entry(&chan->free_links,
+ struct nbpf_link_desc, node);
+ ldesc->desc = desc;
+ if (prev)
+ prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr;
+
+ prev = ldesc;
+ list_move_tail(&ldesc->node, &desc->sg);
+
+ i++;
+ } while (i < len);
+ } while (!desc);
+
+ prev->hwdesc->next = 0;
+
+ spin_unlock_irq(&chan->lock);
+
+ return desc;
+}
+
+static void nbpf_chan_idle(struct nbpf_channel *chan)
+{
+ struct nbpf_desc *desc, *tmp;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_splice_init(&chan->done, &head);
+ list_splice_init(&chan->active, &head);
+ list_splice_init(&chan->queued, &head);
+
+ chan->running = NULL;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, tmp, &head, node) {
+ dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n",
+ __func__, desc, desc->async_tx.cookie);
+ list_del(&desc->node);
+ nbpf_desc_put(desc);
+ }
+}
+
+static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ struct dma_slave_config *config;
+
+ dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ dev_dbg(dchan->device->dev, "Terminating\n");
+ nbpf_chan_halt(chan);
+ nbpf_chan_idle(chan);
+ break;
+
+ case DMA_SLAVE_CONFIG:
+ if (!arg)
+ return -EINVAL;
+ config = (struct dma_slave_config *)arg;
+
+ /*
+ * We could check config->slave_id to match chan->terminal here,
+ * but with DT they would be coming from the same source, so
+ * such a check would be superflous
+ */
+
+ chan->slave_dst_addr = config->dst_addr;
+ chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
+ config->dst_addr_width, 1);
+ chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
+ config->dst_addr_width,
+ config->dst_maxburst);
+ chan->slave_src_addr = config->src_addr;
+ chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
+ config->src_addr_width, 1);
+ chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
+ config->src_addr_width,
+ config->src_maxburst);
+ break;
+
+ case DMA_PAUSE:
+ chan->paused = true;
+ nbpf_pause(chan);
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan,
+ struct scatterlist *src_sg, struct scatterlist *dst_sg,
+ size_t len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct nbpf_link_desc *ldesc;
+ struct scatterlist *mem_sg;
+ struct nbpf_desc *desc;
+ bool inc_src, inc_dst;
+ size_t data_len = 0;
+ int i = 0;
+
+ switch (direction) {
+ case DMA_DEV_TO_MEM:
+ mem_sg = dst_sg;
+ inc_src = false;
+ inc_dst = true;
+ break;
+
+ case DMA_MEM_TO_DEV:
+ mem_sg = src_sg;
+ inc_src = true;
+ inc_dst = false;
+ break;
+
+ default:
+ case DMA_MEM_TO_MEM:
+ mem_sg = src_sg;
+ inc_src = true;
+ inc_dst = true;
+ }
+
+ desc = nbpf_desc_get(chan, len);
+ if (!desc)
+ return NULL;
+
+ desc->async_tx.flags = flags;
+ desc->async_tx.cookie = -EBUSY;
+ desc->user_wait = false;
+
+ /*
+ * This is a private descriptor list, and we own the descriptor. No need
+ * to lock.
+ */
+ list_for_each_entry(ldesc, &desc->sg, node) {
+ int ret = nbpf_prep_one(ldesc, direction,
+ sg_dma_address(src_sg),
+ sg_dma_address(dst_sg),
+ sg_dma_len(mem_sg),
+ i == len - 1);
+ if (ret < 0) {
+ nbpf_desc_put(desc);
+ return NULL;
+ }
+ data_len += sg_dma_len(mem_sg);
+ if (inc_src)
+ src_sg = sg_next(src_sg);
+ if (inc_dst)
+ dst_sg = sg_next(dst_sg);
+ mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg;
+ i++;
+ }
+
+ desc->length = data_len;
+
+ /* The user has to return the descriptor to us ASAP via .tx_submit() */
+ return &desc->async_tx;
+}
+
+static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
+ struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ struct scatterlist dst_sg;
+ struct scatterlist src_sg;
+
+ sg_init_table(&dst_sg, 1);
+ sg_init_table(&src_sg, 1);
+
+ sg_dma_address(&dst_sg) = dst;
+ sg_dma_address(&src_sg) = src;
+
+ sg_dma_len(&dst_sg) = len;
+ sg_dma_len(&src_sg) = len;
+
+ dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n",
+ __func__, len, &src, &dst);
+
+ return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1,
+ DMA_MEM_TO_MEM, flags);
+}
+
+static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg(
+ struct dma_chan *dchan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+
+ if (dst_nents != src_nents)
+ return NULL;
+
+ return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents,
+ DMA_MEM_TO_MEM, flags);
+}
+
+static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
+ struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags, void *context)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ struct scatterlist slave_sg;
+
+ dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
+
+ sg_init_table(&slave_sg, 1);
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ sg_dma_address(&slave_sg) = chan->slave_dst_addr;
+ return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len,
+ direction, flags);
+
+ case DMA_DEV_TO_MEM:
+ sg_dma_address(&slave_sg) = chan->slave_src_addr;
+ return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len,
+ direction, flags);
+
+ default:
+ return NULL;
+ }
+}
+
+static int nbpf_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ int ret;
+
+ INIT_LIST_HEAD(&chan->free);
+ INIT_LIST_HEAD(&chan->free_links);
+ INIT_LIST_HEAD(&chan->queued);
+ INIT_LIST_HEAD(&chan->active);
+ INIT_LIST_HEAD(&chan->done);
+
+ ret = nbpf_desc_page_alloc(chan);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__,
+ chan->terminal);
+
+ nbpf_chan_configure(chan);
+
+ return ret;
+}
+
+static void nbpf_free_chan_resources(struct dma_chan *dchan)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ struct nbpf_desc_page *dpage, *tmp;
+
+ dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
+
+ nbpf_chan_halt(chan);
+ nbpf_chan_idle(chan);
+ /* Clean up for if a channel is re-used for MEMCPY after slave DMA */
+ nbpf_chan_prepare_default(chan);
+
+ list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) {
+ struct nbpf_link_desc *ldesc;
+ int i;
+ list_del(&dpage->node);
+ for (i = 0, ldesc = dpage->ldesc;
+ i < ARRAY_SIZE(dpage->ldesc);
+ i++, ldesc++)
+ dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
+ sizeof(*ldesc->hwdesc), DMA_TO_DEVICE);
+ free_page((unsigned long)dpage);
+ }
+}
+
+static int nbpf_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = NBPF_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = false;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
+static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct nbpf_device *nbpf = ofdma->of_dma_data;
+ struct dma_chan *dchan;
+ struct nbpf_channel *chan;
+
+ if (dma_spec->args_count != 2)
+ return NULL;
+
+ dchan = dma_get_any_slave_channel(&nbpf->dma_dev);
+ if (!dchan)
+ return NULL;
+
+ dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__,
+ dma_spec->np->name);
+
+ chan = nbpf_to_chan(dchan);
+
+ chan->terminal = dma_spec->args[0];
+ chan->flags = dma_spec->args[1];
+
+ nbpf_chan_prepare(chan);
+ nbpf_chan_configure(chan);
+
+ return dchan;
+}
+
+static void nbpf_chan_tasklet(unsigned long data)
+{
+ struct nbpf_channel *chan = (struct nbpf_channel *)data;
+ struct nbpf_desc *desc, *tmp;
+ dma_async_tx_callback callback;
+ void *param;
+
+ while (!list_empty(&chan->done)) {
+ bool found = false, must_put, recycling = false;
+
+ spin_lock_irq(&chan->lock);
+
+ list_for_each_entry_safe(desc, tmp, &chan->done, node) {
+ if (!desc->user_wait) {
+ /* Newly completed descriptor, have to process */
+ found = true;
+ break;
+ } else if (async_tx_test_ack(&desc->async_tx)) {
+ /*
+ * This descriptor was waiting for a user ACK,
+ * it can be recycled now.
+ */
+ list_del(&desc->node);
+ spin_unlock_irq(&chan->lock);
+ nbpf_desc_put(desc);
+ recycling = true;
+ break;
+ }
+ }
+
+ if (recycling)
+ continue;
+
+ if (!found) {
+ /* This can happen if TERMINATE_ALL has been called */
+ spin_unlock_irq(&chan->lock);
+ break;
+ }
+
+ dma_cookie_complete(&desc->async_tx);
+
+ /*
+ * With released lock we cannot dereference desc, maybe it's
+ * still on the "done" list
+ */
+ if (async_tx_test_ack(&desc->async_tx)) {
+ list_del(&desc->node);
+ must_put = true;
+ } else {
+ desc->user_wait = true;
+ must_put = false;
+ }
+
+ callback = desc->async_tx.callback;
+ param = desc->async_tx.callback_param;
+
+ /* ack and callback completed descriptor */
+ spin_unlock_irq(&chan->lock);
+
+ if (callback)
+ callback(param);
+
+ if (must_put)
+ nbpf_desc_put(desc);
+ }
+}
+
+static irqreturn_t nbpf_chan_irq(int irq, void *dev)
+{
+ struct nbpf_channel *chan = dev;
+ bool done = nbpf_status_get(chan);
+ struct nbpf_desc *desc;
+ irqreturn_t ret;
+ bool bh = false;
+
+ if (!done)
+ return IRQ_NONE;
+
+ nbpf_status_ack(chan);
+
+ dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__);
+
+ spin_lock(&chan->lock);
+ desc = chan->running;
+ if (WARN_ON(!desc)) {
+ ret = IRQ_NONE;
+ goto unlock;
+ } else {
+ ret = IRQ_HANDLED;
+ bh = true;
+ }
+
+ list_move_tail(&desc->node, &chan->done);
+ chan->running = NULL;
+
+ if (!list_empty(&chan->active)) {
+ desc = list_first_entry(&chan->active,
+ struct nbpf_desc, node);
+ if (!nbpf_start(desc))
+ chan->running = desc;
+ }
+
+unlock:
+ spin_unlock(&chan->lock);
+
+ if (bh)
+ tasklet_schedule(&chan->tasklet);
+
+ return ret;
+}
+
+static irqreturn_t nbpf_err_irq(int irq, void *dev)
+{
+ struct nbpf_device *nbpf = dev;
+ u32 error = nbpf_error_get(nbpf);
+
+ dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq);
+
+ if (!error)
+ return IRQ_NONE;
+
+ do {
+ struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error);
+ /* On error: abort all queued transfers, no callback */
+ nbpf_error_clear(chan);
+ nbpf_chan_idle(chan);
+ error = nbpf_error_get(nbpf);
+ } while (error);
+
+ return IRQ_HANDLED;
+}
+
+static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
+{
+ struct dma_device *dma_dev = &nbpf->dma_dev;
+ struct nbpf_channel *chan = nbpf->chan + n;
+ int ret;
+
+ chan->nbpf = nbpf;
+ chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n;
+ INIT_LIST_HEAD(&chan->desc_page);
+ spin_lock_init(&chan->lock);
+ chan->dma_chan.device = dma_dev;
+ dma_cookie_init(&chan->dma_chan);
+ nbpf_chan_prepare_default(chan);
+
+ dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base);
+
+ snprintf(chan->name, sizeof(chan->name), "nbpf %d", n);
+
+ tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan);
+ ret = devm_request_irq(dma_dev->dev, chan->irq,
+ nbpf_chan_irq, IRQF_SHARED,
+ chan->name, chan);
+ if (ret < 0)
+ return ret;
+
+ /* Add the channel to DMA device channel list */
+ list_add_tail(&chan->dma_chan.device_node,
+ &dma_dev->channels);
+
+ return 0;
+}
+
+static const struct of_device_id nbpf_match[] = {
+ {.compatible = "renesas,nbpfaxi64dmac1b4", .data = &nbpf_cfg[NBPF1B4]},
+ {.compatible = "renesas,nbpfaxi64dmac1b8", .data = &nbpf_cfg[NBPF1B8]},
+ {.compatible = "renesas,nbpfaxi64dmac1b16", .data = &nbpf_cfg[NBPF1B16]},
+ {.compatible = "renesas,nbpfaxi64dmac4b4", .data = &nbpf_cfg[NBPF4B4]},
+ {.compatible = "renesas,nbpfaxi64dmac4b8", .data = &nbpf_cfg[NBPF4B8]},
+ {.compatible = "renesas,nbpfaxi64dmac4b16", .data = &nbpf_cfg[NBPF4B16]},
+ {.compatible = "renesas,nbpfaxi64dmac8b4", .data = &nbpf_cfg[NBPF8B4]},
+ {.compatible = "renesas,nbpfaxi64dmac8b8", .data = &nbpf_cfg[NBPF8B8]},
+ {.compatible = "renesas,nbpfaxi64dmac8b16", .data = &nbpf_cfg[NBPF8B16]},
+ {}
+};
+MODULE_DEVICE_TABLE(of, nbpf_match);
+
+static int nbpf_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id = of_match_device(nbpf_match, dev);
+ struct device_node *np = dev->of_node;
+ struct nbpf_device *nbpf;
+ struct dma_device *dma_dev;
+ struct resource *iomem, *irq_res;
+ const struct nbpf_config *cfg;
+ int num_channels;
+ int ret, irq, eirq, i;
+ int irqbuf[9] /* maximum 8 channels + error IRQ */;
+ unsigned int irqs = 0;
+
+ BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE);
+
+ /* DT only */
+ if (!np || !of_id || !of_id->data)
+ return -ENODEV;
+
+ cfg = of_id->data;
+ num_channels = cfg->num_channels;
+
+ nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
+ sizeof(nbpf->chan[0]), GFP_KERNEL);
+ if (!nbpf) {
+ dev_err(dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ dma_dev = &nbpf->dma_dev;
+ dma_dev->dev = dev;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nbpf->base = devm_ioremap_resource(dev, iomem);
+ if (IS_ERR(nbpf->base))
+ return PTR_ERR(nbpf->base);
+
+ nbpf->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(nbpf->clk))
+ return PTR_ERR(nbpf->clk);
+
+ nbpf->config = cfg;
+
+ for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!irq_res)
+ break;
+
+ for (irq = irq_res->start; irq <= irq_res->end;
+ irq++, irqs++)
+ irqbuf[irqs] = irq;
+ }
+
+ /*
+ * 3 IRQ resource schemes are supported:
+ * 1. 1 shared IRQ for error and all channels
+ * 2. 2 IRQs: one for error and one shared for all channels
+ * 3. 1 IRQ for error and an own IRQ for each channel
+ */
+ if (irqs != 1 && irqs != 2 && irqs != num_channels + 1)
+ return -ENXIO;
+
+ if (irqs == 1) {
+ eirq = irqbuf[0];
+
+ for (i = 0; i <= num_channels; i++)
+ nbpf->chan[i].irq = irqbuf[0];
+ } else {
+ eirq = platform_get_irq_byname(pdev, "error");
+ if (eirq < 0)
+ return eirq;
+
+ if (irqs == num_channels + 1) {
+ struct nbpf_channel *chan;
+
+ for (i = 0, chan = nbpf->chan; i <= num_channels;
+ i++, chan++) {
+ /* Skip the error IRQ */
+ if (irqbuf[i] == eirq)
+ i++;
+ chan->irq = irqbuf[i];
+ }
+
+ if (chan != nbpf->chan + num_channels)
+ return -EINVAL;
+ } else {
+ /* 2 IRQs and more than one channel */
+ if (irqbuf[0] == eirq)
+ irq = irqbuf[1];
+ else
+ irq = irqbuf[0];
+
+ for (i = 0; i <= num_channels; i++)
+ nbpf->chan[i].irq = irq;
+ }
+ }
+
+ ret = devm_request_irq(dev, eirq, nbpf_err_irq,
+ IRQF_SHARED, "dma error", nbpf);
+ if (ret < 0)
+ return ret;
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* Create DMA Channel */
+ for (i = 0; i < num_channels; i++) {
+ ret = nbpf_chan_probe(nbpf, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+ dma_cap_set(DMA_SG, dma_dev->cap_mask);
+
+ /* Common and MEMCPY operations */
+ dma_dev->device_alloc_chan_resources
+ = nbpf_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
+ dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg;
+ dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
+ dma_dev->device_tx_status = nbpf_tx_status;
+ dma_dev->device_issue_pending = nbpf_issue_pending;
+ dma_dev->device_slave_caps = nbpf_slave_caps;
+
+ /*
+ * If we drop support for unaligned MEMCPY buffer addresses and / or
+ * lengths by setting
+ * dma_dev->copy_align = 4;
+ * then we can set transfer length to 4 bytes in nbpf_prep_one() for
+ * DMA_MEM_TO_MEM
+ */
+
+ /* Compulsory for DMA_SLAVE fields */
+ dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
+ dma_dev->device_control = nbpf_control;
+
+ platform_set_drvdata(pdev, nbpf);
+
+ ret = clk_prepare_enable(nbpf->clk);
+ if (ret < 0)
+ return ret;
+
+ nbpf_configure(nbpf);
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret < 0)
+ goto e_clk_off;
+
+ ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf);
+ if (ret < 0)
+ goto e_dma_dev_unreg;
+
+ return 0;
+
+e_dma_dev_unreg:
+ dma_async_device_unregister(dma_dev);
+e_clk_off:
+ clk_disable_unprepare(nbpf->clk);
+
+ return ret;
+}
+
+static int nbpf_remove(struct platform_device *pdev)
+{
+ struct nbpf_device *nbpf = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&nbpf->dma_dev);
+ clk_disable_unprepare(nbpf->clk);
+
+ return 0;
+}
+
+static struct platform_device_id nbpf_ids[] = {
+ {"nbpfaxi64dmac1b4", (kernel_ulong_t)&nbpf_cfg[NBPF1B4]},
+ {"nbpfaxi64dmac1b8", (kernel_ulong_t)&nbpf_cfg[NBPF1B8]},
+ {"nbpfaxi64dmac1b16", (kernel_ulong_t)&nbpf_cfg[NBPF1B16]},
+ {"nbpfaxi64dmac4b4", (kernel_ulong_t)&nbpf_cfg[NBPF4B4]},
+ {"nbpfaxi64dmac4b8", (kernel_ulong_t)&nbpf_cfg[NBPF4B8]},
+ {"nbpfaxi64dmac4b16", (kernel_ulong_t)&nbpf_cfg[NBPF4B16]},
+ {"nbpfaxi64dmac8b4", (kernel_ulong_t)&nbpf_cfg[NBPF8B4]},
+ {"nbpfaxi64dmac8b8", (kernel_ulong_t)&nbpf_cfg[NBPF8B8]},
+ {"nbpfaxi64dmac8b16", (kernel_ulong_t)&nbpf_cfg[NBPF8B16]},
+ {},
+};
+MODULE_DEVICE_TABLE(platform, nbpf_ids);
+
+#ifdef CONFIG_PM_RUNTIME
+static int nbpf_runtime_suspend(struct device *dev)
+{
+ struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev));
+ clk_disable_unprepare(nbpf->clk);
+ return 0;
+}
+
+static int nbpf_runtime_resume(struct device *dev)
+{
+ struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev));
+ return clk_prepare_enable(nbpf->clk);
+}
+#endif
+
+static const struct dev_pm_ops nbpf_pm_ops = {
+ SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
+};
+
+static struct platform_driver nbpf_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "dma-nbpf",
+ .of_match_table = nbpf_match,
+ .pm = &nbpf_pm_ops,
+ },
+ .id_table = nbpf_ids,
+ .probe = nbpf_probe,
+ .remove = nbpf_remove,
+};
+
+module_platform_driver(nbpf_driver);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs");
+MODULE_LICENSE("GPL v2");
&dma_spec->args[0]);
}
EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
+
+/**
+ * of_dma_xlate_by_chan_id - Translate dt property to DMA channel by channel id
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ * @of_dma: pointer to DMA controller data
+ *
+ * This function can be used as the of xlate callback for DMA driver which wants
+ * to match the channel based on the channel id. When using this xlate function
+ * the #dma-cells propety of the DMA controller dt node needs to be set to 1.
+ * The data parameter of of_dma_controller_register must be a pointer to the
+ * dma_device struct the function should match upon.
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dma_device *dev = ofdma->of_dma_data;
+ struct dma_chan *chan, *candidate = NULL;
+
+ if (!dev || dma_spec->args_count != 1)
+ return NULL;
+
+ list_for_each_entry(chan, &dev->channels, device_node)
+ if (chan->chan_id == dma_spec->args[0]) {
+ candidate = chan;
+ break;
+ }
+
+ if (!candidate)
+ return NULL;
+
+ return dma_get_slave_channel(candidate);
+}
+EXPORT_SYMBOL_GPL(of_dma_xlate_by_chan_id);
static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
- void *context)
+ size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
{
struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
#define PL330_MAX_IRQS 32
#define PL330_MAX_PERI 32
-enum pl330_srccachectrl {
- SCCTRL0, /* Noncacheable and nonbufferable */
- SCCTRL1, /* Bufferable only */
- SCCTRL2, /* Cacheable, but do not allocate */
- SCCTRL3, /* Cacheable and bufferable, but do not allocate */
- SINVALID1,
- SINVALID2,
- SCCTRL6, /* Cacheable write-through, allocate on reads only */
- SCCTRL7, /* Cacheable write-back, allocate on reads only */
-};
-
-enum pl330_dstcachectrl {
- DCCTRL0, /* Noncacheable and nonbufferable */
- DCCTRL1, /* Bufferable only */
- DCCTRL2, /* Cacheable, but do not allocate */
- DCCTRL3, /* Cacheable and bufferable, but do not allocate */
- DINVALID1, /* AWCACHE = 0x1000 */
- DINVALID2,
- DCCTRL6, /* Cacheable write-through, allocate on writes only */
- DCCTRL7, /* Cacheable write-back, allocate on writes only */
+enum pl330_cachectrl {
+ CCTRL0, /* Noncacheable and nonbufferable */
+ CCTRL1, /* Bufferable only */
+ CCTRL2, /* Cacheable, but do not allocate */
+ CCTRL3, /* Cacheable and bufferable, but do not allocate */
+ INVALID1, /* AWCACHE = 0x1000 */
+ INVALID2,
+ CCTRL6, /* Cacheable write-through, allocate on writes only */
+ CCTRL7, /* Cacheable write-back, allocate on writes only */
};
enum pl330_byteswap {
SWAP_16,
};
-enum pl330_reqtype {
- MEMTOMEM,
- MEMTODEV,
- DEVTOMEM,
- DEVTODEV,
-};
-
/* Register and Bit field Definitions */
#define DS 0x0
#define DS_ST_STOP 0x0
*/
#define MCODE_BUFF_PER_REQ 256
-/* If the _pl330_req is available to the client */
-#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
-
/* Use this _only_ to wait on transient states */
#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
u32 irq_ns;
};
-/* Handle to the DMAC provided to the PL330 core */
-struct pl330_info {
- /* Owning device */
- struct device *dev;
- /* Size of MicroCode buffers for each channel. */
- unsigned mcbufsz;
- /* ioremap'ed address of PL330 registers. */
- void __iomem *base;
- /* Client can freely use it. */
- void *client_data;
- /* PL330 core data, Client must not touch it. */
- void *pl330_data;
- /* Populated by the PL330 core driver during pl330_add */
- struct pl330_config pcfg;
- /*
- * If the DMAC has some reset mechanism, then the
- * client may want to provide pointer to the method.
- */
- void (*dmac_reset)(struct pl330_info *pi);
-};
-
/**
* Request Configuration.
* The PL330 core does not modify this and uses the last
unsigned brst_len:5;
unsigned brst_size:3; /* in power of 2 */
- enum pl330_dstcachectrl dcctl;
- enum pl330_srccachectrl scctl;
+ enum pl330_cachectrl dcctl;
+ enum pl330_cachectrl scctl;
enum pl330_byteswap swap;
struct pl330_config *pcfg;
};
u32 dst_addr;
/* Size to xfer */
u32 bytes;
- /*
- * Pointer to next xfer in the list.
- * The last xfer in the req must point to NULL.
- */
- struct pl330_xfer *next;
};
/* The xfer callbacks are made with one of these arguments. */
PL330_ERR_FAIL,
};
-/* A request defining Scatter-Gather List ending with NULL xfer. */
-struct pl330_req {
- enum pl330_reqtype rqtype;
- /* Index of peripheral for the xfer. */
- unsigned peri:5;
- /* Unique token for this xfer, set by the client. */
- void *token;
- /* Callback to be called after xfer. */
- void (*xfer_cb)(void *token, enum pl330_op_err err);
- /* If NULL, req will be done at last set parameters. */
- struct pl330_reqcfg *cfg;
- /* Pointer to first xfer in the request. */
- struct pl330_xfer *x;
- /* Hook to attach to DMAC's list of reqs with due callback */
- struct list_head rqd;
-};
-
-/*
- * To know the status of the channel and DMAC, the client
- * provides a pointer to this structure. The PL330 core
- * fills it with current information.
- */
-struct pl330_chanstatus {
- /*
- * If the DMAC engine halted due to some error,
- * the client should remove-add DMAC.
- */
- bool dmac_halted;
- /*
- * If channel is halted due to some error,
- * the client should ABORT/FLUSH and START the channel.
- */
- bool faulting;
- /* Location of last load */
- u32 src_addr;
- /* Location of last store */
- u32 dst_addr;
- /*
- * Pointer to the currently active req, NULL if channel is
- * inactive, even though the requests may be present.
- */
- struct pl330_req *top_req;
- /* Pointer to req waiting second in the queue if any. */
- struct pl330_req *wait_req;
-};
-
-enum pl330_chan_op {
- /* Start the channel */
- PL330_OP_START,
- /* Abort the active xfer */
- PL330_OP_ABORT,
- /* Stop xfer and flush queue */
- PL330_OP_FLUSH,
-};
-
-struct _xfer_spec {
- u32 ccr;
- struct pl330_req *r;
- struct pl330_xfer *x;
-};
-
enum dmamov_dst {
SAR = 0,
CCR,
ALWAYS,
};
+struct dma_pl330_desc;
+
struct _pl330_req {
u32 mc_bus;
void *mc_cpu;
- /* Number of bytes taken to setup MC for the req */
- u32 mc_len;
- struct pl330_req *r;
+ struct dma_pl330_desc *desc;
};
/* ToBeDone for tasklet */
DYING,
};
-/* A DMAC */
-struct pl330_dmac {
- spinlock_t lock;
- /* Holds list of reqs with due callbacks */
- struct list_head req_done;
- /* Pointer to platform specific stuff */
- struct pl330_info *pinfo;
- /* Maximum possible events/irqs */
- int events[32];
- /* BUS address of MicroCode buffer */
- dma_addr_t mcode_bus;
- /* CPU address of MicroCode buffer */
- void *mcode_cpu;
- /* List of all Channel threads */
- struct pl330_thread *channels;
- /* Pointer to the MANAGER thread */
- struct pl330_thread *manager;
- /* To handle bad news in interrupt */
- struct tasklet_struct tasks;
- struct _pl330_tbd dmac_tbd;
- /* State of DMAC operation */
- enum pl330_dmac_state state;
-};
-
enum desc_status {
/* In the DMAC pool */
FREE,
* As the parent, this DMAC also provides descriptors
* to the channel.
*/
- struct dma_pl330_dmac *dmac;
+ struct pl330_dmac *dmac;
/* To protect channel manipulation */
spinlock_t lock;
- /* Token of a hardware channel thread of PL330 DMAC
- * NULL if the channel is available to be acquired.
+ /*
+ * Hardware channel thread of PL330 DMAC. NULL if the channel is
+ * available.
*/
- void *pl330_chid;
+ struct pl330_thread *thread;
/* For D-to-M and M-to-D channels */
int burst_sz; /* the peripheral fifo width */
bool cyclic;
};
-struct dma_pl330_dmac {
- struct pl330_info pif;
-
+struct pl330_dmac {
/* DMA-Engine Device */
struct dma_device ddma;
/* To protect desc_pool manipulation */
spinlock_t pool_lock;
+ /* Size of MicroCode buffers for each channel. */
+ unsigned mcbufsz;
+ /* ioremap'ed address of PL330 registers. */
+ void __iomem *base;
+ /* Populated by the PL330 core driver during pl330_add */
+ struct pl330_config pcfg;
+
+ spinlock_t lock;
+ /* Maximum possible events/irqs */
+ int events[32];
+ /* BUS address of MicroCode buffer */
+ dma_addr_t mcode_bus;
+ /* CPU address of MicroCode buffer */
+ void *mcode_cpu;
+ /* List of all Channel threads */
+ struct pl330_thread *channels;
+ /* Pointer to the MANAGER thread */
+ struct pl330_thread *manager;
+ /* To handle bad news in interrupt */
+ struct tasklet_struct tasks;
+ struct _pl330_tbd dmac_tbd;
+ /* State of DMAC operation */
+ enum pl330_dmac_state state;
+ /* Holds list of reqs with due callbacks */
+ struct list_head req_done;
+
/* Peripheral channels connected to this DMAC */
unsigned int num_peripherals;
struct dma_pl330_chan *peripherals; /* keep at end */
struct pl330_xfer px;
struct pl330_reqcfg rqcfg;
- struct pl330_req req;
enum desc_status status;
/* The channel which currently holds this desc */
struct dma_pl330_chan *pchan;
+
+ enum dma_transfer_direction rqtype;
+ /* Index of peripheral for the xfer. */
+ unsigned peri:5;
+ /* Hook to attach to DMAC's list of reqs with due callback */
+ struct list_head rqd;
};
-static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
-{
- if (r && r->xfer_cb)
- r->xfer_cb(r->token, err);
-}
+struct _xfer_spec {
+ u32 ccr;
+ struct dma_pl330_desc *desc;
+};
static inline bool _queue_empty(struct pl330_thread *thrd)
{
- return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
- ? true : false;
+ return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL;
}
static inline bool _queue_full(struct pl330_thread *thrd)
{
- return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
- ? false : true;
+ return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
}
static inline bool is_manager(struct pl330_thread *thrd)
{
- struct pl330_dmac *pl330 = thrd->dmac;
-
- /* MANAGER is indexed at the end */
- if (thrd->id == pl330->pinfo->pcfg.num_chan)
- return true;
- else
- return false;
+ return thrd->dmac->manager == thrd;
}
/* If manager of the thread is in Non-Secure mode */
static inline bool _manager_ns(struct pl330_thread *thrd)
{
- struct pl330_dmac *pl330 = thrd->dmac;
-
- return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
+ return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false;
}
static inline u32 get_revision(u32 periph_id)
/* Returns Time-Out */
static bool _until_dmac_idle(struct pl330_thread *thrd)
{
- void __iomem *regs = thrd->dmac->pinfo->base;
+ void __iomem *regs = thrd->dmac->base;
unsigned long loops = msecs_to_loops(5);
do {
static inline void _execute_DBGINSN(struct pl330_thread *thrd,
u8 insn[], bool as_manager)
{
- void __iomem *regs = thrd->dmac->pinfo->base;
+ void __iomem *regs = thrd->dmac->base;
u32 val;
val = (insn[0] << 16) | (insn[1] << 24);
/* If timed out due to halted state-machine */
if (_until_dmac_idle(thrd)) {
- dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
+ dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
return;
}
writel(0, regs + DBGCMD);
}
-/*
- * Mark a _pl330_req as free.
- * We do it by writing DMAEND as the first instruction
- * because no valid request is going to have DMAEND as
- * its first instruction to execute.
- */
-static void mark_free(struct pl330_thread *thrd, int idx)
-{
- struct _pl330_req *req = &thrd->req[idx];
-
- _emit_END(0, req->mc_cpu);
- req->mc_len = 0;
-
- thrd->req_running = -1;
-}
-
static inline u32 _state(struct pl330_thread *thrd)
{
- void __iomem *regs = thrd->dmac->pinfo->base;
+ void __iomem *regs = thrd->dmac->base;
u32 val;
if (is_manager(thrd))
static void _stop(struct pl330_thread *thrd)
{
- void __iomem *regs = thrd->dmac->pinfo->base;
+ void __iomem *regs = thrd->dmac->base;
u8 insn[6] = {0, 0, 0, 0, 0, 0};
if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
/* Start doing req 'idx' of thread 'thrd' */
static bool _trigger(struct pl330_thread *thrd)
{
- void __iomem *regs = thrd->dmac->pinfo->base;
+ void __iomem *regs = thrd->dmac->base;
struct _pl330_req *req;
- struct pl330_req *r;
+ struct dma_pl330_desc *desc;
struct _arg_GO go;
unsigned ns;
u8 insn[6] = {0, 0, 0, 0, 0, 0};
return true;
idx = 1 - thrd->lstenq;
- if (!IS_FREE(&thrd->req[idx]))
+ if (thrd->req[idx].desc != NULL) {
req = &thrd->req[idx];
- else {
+ } else {
idx = thrd->lstenq;
- if (!IS_FREE(&thrd->req[idx]))
+ if (thrd->req[idx].desc != NULL)
req = &thrd->req[idx];
else
req = NULL;
}
/* Return if no request */
- if (!req || !req->r)
+ if (!req)
return true;
- r = req->r;
+ desc = req->desc;
- if (r->cfg)
- ns = r->cfg->nonsecure ? 1 : 0;
- else if (readl(regs + CS(thrd->id)) & CS_CNS)
- ns = 1;
- else
- ns = 0;
+ ns = desc->rqcfg.nonsecure ? 1 : 0;
/* See 'Abort Sources' point-4 at Page 2-25 */
if (_manager_ns(thrd) && !ns)
- dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
+ dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n",
__func__, __LINE__);
go.chan = thrd->id;
const struct _xfer_spec *pxs, int cyc)
{
int off = 0;
- struct pl330_config *pcfg = pxs->r->cfg->pcfg;
+ struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
/* check lock-up free version */
if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
int off = 0;
while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
- off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+ off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
off += _emit_ST(dry_run, &buf[off], ALWAYS);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
+ off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
}
return off;
int off = 0;
while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
off += _emit_LD(dry_run, &buf[off], ALWAYS);
- off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
+ off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+ off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
}
return off;
{
int off = 0;
- switch (pxs->r->rqtype) {
- case MEMTODEV:
+ switch (pxs->desc->rqtype) {
+ case DMA_MEM_TO_DEV:
off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
break;
- case DEVTOMEM:
+ case DMA_DEV_TO_MEM:
off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
break;
- case MEMTOMEM:
+ case DMA_MEM_TO_MEM:
off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
break;
default:
static inline int _setup_loops(unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs)
{
- struct pl330_xfer *x = pxs->x;
+ struct pl330_xfer *x = &pxs->desc->px;
u32 ccr = pxs->ccr;
unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
int off = 0;
static inline int _setup_xfer(unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs)
{
- struct pl330_xfer *x = pxs->x;
+ struct pl330_xfer *x = &pxs->desc->px;
int off = 0;
/* DMAMOV SAR, x->src_addr */
/* DMAMOV CCR, ccr */
off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
- x = pxs->r->x;
- do {
- /* Error if xfer length is not aligned at burst size */
- if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
- return -EINVAL;
-
- pxs->x = x;
- off += _setup_xfer(dry_run, &buf[off], pxs);
+ x = &pxs->desc->px;
+ /* Error if xfer length is not aligned at burst size */
+ if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
+ return -EINVAL;
- x = x->next;
- } while (x);
+ off += _setup_xfer(dry_run, &buf[off], pxs);
/* DMASEV peripheral/event */
off += _emit_SEV(dry_run, &buf[off], thrd->ev);
return ccr;
}
-static inline bool _is_valid(u32 ccr)
-{
- enum pl330_dstcachectrl dcctl;
- enum pl330_srccachectrl scctl;
-
- dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
- scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
-
- if (dcctl == DINVALID1 || dcctl == DINVALID2
- || scctl == SINVALID1 || scctl == SINVALID2)
- return false;
- else
- return true;
-}
-
/*
* Submit a list of xfers after which the client wants notification.
* Client is not notified after each xfer unit, just once after all
* xfer units are done or some error occurs.
*/
-static int pl330_submit_req(void *ch_id, struct pl330_req *r)
+static int pl330_submit_req(struct pl330_thread *thrd,
+ struct dma_pl330_desc *desc)
{
- struct pl330_thread *thrd = ch_id;
- struct pl330_dmac *pl330;
- struct pl330_info *pi;
+ struct pl330_dmac *pl330 = thrd->dmac;
struct _xfer_spec xs;
unsigned long flags;
void __iomem *regs;
int ret = 0;
/* No Req or Unacquired Channel or DMAC */
- if (!r || !thrd || thrd->free)
+ if (!desc || !thrd || thrd->free)
return -EINVAL;
- pl330 = thrd->dmac;
- pi = pl330->pinfo;
- regs = pi->base;
+ regs = thrd->dmac->base;
if (pl330->state == DYING
|| pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
- dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
+ dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
__func__, __LINE__);
return -EAGAIN;
}
/* If request for non-existing peripheral */
- if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
- dev_info(thrd->dmac->pinfo->dev,
+ if (desc->rqtype != DMA_MEM_TO_MEM &&
+ desc->peri >= pl330->pcfg.num_peri) {
+ dev_info(thrd->dmac->ddma.dev,
"%s:%d Invalid peripheral(%u)!\n",
- __func__, __LINE__, r->peri);
+ __func__, __LINE__, desc->peri);
return -EINVAL;
}
goto xfer_exit;
}
+ /* Prefer Secure Channel */
+ if (!_manager_ns(thrd))
+ desc->rqcfg.nonsecure = 0;
+ else
+ desc->rqcfg.nonsecure = 1;
- /* Use last settings, if not provided */
- if (r->cfg) {
- /* Prefer Secure Channel */
- if (!_manager_ns(thrd))
- r->cfg->nonsecure = 0;
- else
- r->cfg->nonsecure = 1;
-
- ccr = _prepare_ccr(r->cfg);
- } else {
- ccr = readl(regs + CC(thrd->id));
- }
-
- /* If this req doesn't have valid xfer settings */
- if (!_is_valid(ccr)) {
- ret = -EINVAL;
- dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
- __func__, __LINE__, ccr);
- goto xfer_exit;
- }
+ ccr = _prepare_ccr(&desc->rqcfg);
- idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
+ idx = thrd->req[0].desc == NULL ? 0 : 1;
xs.ccr = ccr;
- xs.r = r;
+ xs.desc = desc;
/* First dry run to check if req is acceptable */
ret = _setup_req(1, thrd, idx, &xs);
if (ret < 0)
goto xfer_exit;
- if (ret > pi->mcbufsz / 2) {
- dev_info(thrd->dmac->pinfo->dev,
- "%s:%d Trying increasing mcbufsz\n",
+ if (ret > pl330->mcbufsz / 2) {
+ dev_info(pl330->ddma.dev, "%s:%d Trying increasing mcbufsz\n",
__func__, __LINE__);
ret = -ENOMEM;
goto xfer_exit;
/* Hook the request */
thrd->lstenq = idx;
- thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
- thrd->req[idx].r = r;
+ thrd->req[idx].desc = desc;
+ _setup_req(0, thrd, idx, &xs);
ret = 0;
return ret;
}
+static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
+{
+ struct dma_pl330_chan *pch;
+ unsigned long flags;
+
+ if (!desc)
+ return;
+
+ pch = desc->pchan;
+
+ /* If desc aborted */
+ if (!pch)
+ return;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ desc->status = DONE;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ tasklet_schedule(&pch->task);
+}
+
static void pl330_dotask(unsigned long data)
{
struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
- struct pl330_info *pi = pl330->pinfo;
unsigned long flags;
int i;
if (pl330->dmac_tbd.reset_mngr) {
_stop(pl330->manager);
/* Reset all channels */
- pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
+ pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1;
/* Clear the reset flag */
pl330->dmac_tbd.reset_mngr = false;
}
- for (i = 0; i < pi->pcfg.num_chan; i++) {
+ for (i = 0; i < pl330->pcfg.num_chan; i++) {
if (pl330->dmac_tbd.reset_chan & (1 << i)) {
struct pl330_thread *thrd = &pl330->channels[i];
- void __iomem *regs = pi->base;
+ void __iomem *regs = pl330->base;
enum pl330_op_err err;
_stop(thrd);
err = PL330_ERR_ABORT;
spin_unlock_irqrestore(&pl330->lock, flags);
-
- _callback(thrd->req[1 - thrd->lstenq].r, err);
- _callback(thrd->req[thrd->lstenq].r, err);
-
+ dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
+ dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
spin_lock_irqsave(&pl330->lock, flags);
- thrd->req[0].r = NULL;
- thrd->req[1].r = NULL;
- mark_free(thrd, 0);
- mark_free(thrd, 1);
+ thrd->req[0].desc = NULL;
+ thrd->req[1].desc = NULL;
+ thrd->req_running = -1;
/* Clear the reset flag */
pl330->dmac_tbd.reset_chan &= ~(1 << i);
}
/* Returns 1 if state was updated, 0 otherwise */
-static int pl330_update(const struct pl330_info *pi)
+static int pl330_update(struct pl330_dmac *pl330)
{
- struct pl330_req *rqdone, *tmp;
- struct pl330_dmac *pl330;
+ struct dma_pl330_desc *descdone, *tmp;
unsigned long flags;
void __iomem *regs;
u32 val;
int id, ev, ret = 0;
- if (!pi || !pi->pl330_data)
- return 0;
-
- regs = pi->base;
- pl330 = pi->pl330_data;
+ regs = pl330->base;
spin_lock_irqsave(&pl330->lock, flags);
else
pl330->dmac_tbd.reset_mngr = false;
- val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
+ val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1);
pl330->dmac_tbd.reset_chan |= val;
if (val) {
int i = 0;
- while (i < pi->pcfg.num_chan) {
+ while (i < pl330->pcfg.num_chan) {
if (val & (1 << i)) {
- dev_info(pi->dev,
+ dev_info(pl330->ddma.dev,
"Reset Channel-%d\t CS-%x FTC-%x\n",
i, readl(regs + CS(i)),
readl(regs + FTC(i)));
/* Check which event happened i.e, thread notified */
val = readl(regs + ES);
- if (pi->pcfg.num_events < 32
- && val & ~((1 << pi->pcfg.num_events) - 1)) {
+ if (pl330->pcfg.num_events < 32
+ && val & ~((1 << pl330->pcfg.num_events) - 1)) {
pl330->dmac_tbd.reset_dmac = true;
- dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
+ dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__,
+ __LINE__);
ret = 1;
goto updt_exit;
}
- for (ev = 0; ev < pi->pcfg.num_events; ev++) {
+ for (ev = 0; ev < pl330->pcfg.num_events; ev++) {
if (val & (1 << ev)) { /* Event occurred */
struct pl330_thread *thrd;
u32 inten = readl(regs + INTEN);
continue;
/* Detach the req */
- rqdone = thrd->req[active].r;
- thrd->req[active].r = NULL;
-
- mark_free(thrd, active);
+ descdone = thrd->req[active].desc;
+ thrd->req[active].desc = NULL;
/* Get going again ASAP */
_start(thrd);
/* For now, just make a list of callbacks to be done */
- list_add_tail(&rqdone->rqd, &pl330->req_done);
+ list_add_tail(&descdone->rqd, &pl330->req_done);
}
}
/* Now that we are in no hurry, do the callbacks */
- list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
- list_del(&rqdone->rqd);
-
+ list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
+ list_del(&descdone->rqd);
spin_unlock_irqrestore(&pl330->lock, flags);
- _callback(rqdone, PL330_ERR_NONE);
+ dma_pl330_rqcb(descdone, PL330_ERR_NONE);
spin_lock_irqsave(&pl330->lock, flags);
}
return ret;
}
-static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
-{
- struct pl330_thread *thrd = ch_id;
- struct pl330_dmac *pl330;
- unsigned long flags;
- int ret = 0, active;
-
- if (!thrd || thrd->free || thrd->dmac->state == DYING)
- return -EINVAL;
-
- pl330 = thrd->dmac;
- active = thrd->req_running;
-
- spin_lock_irqsave(&pl330->lock, flags);
-
- switch (op) {
- case PL330_OP_FLUSH:
- /* Make sure the channel is stopped */
- _stop(thrd);
-
- thrd->req[0].r = NULL;
- thrd->req[1].r = NULL;
- mark_free(thrd, 0);
- mark_free(thrd, 1);
- break;
-
- case PL330_OP_ABORT:
- /* Make sure the channel is stopped */
- _stop(thrd);
-
- /* ABORT is only for the active req */
- if (active == -1)
- break;
-
- thrd->req[active].r = NULL;
- mark_free(thrd, active);
-
- /* Start the next */
- case PL330_OP_START:
- if ((active == -1) && !_start(thrd))
- ret = -EIO;
- break;
-
- default:
- ret = -EINVAL;
- }
-
- spin_unlock_irqrestore(&pl330->lock, flags);
- return ret;
-}
-
/* Reserve an event */
static inline int _alloc_event(struct pl330_thread *thrd)
{
struct pl330_dmac *pl330 = thrd->dmac;
- struct pl330_info *pi = pl330->pinfo;
int ev;
- for (ev = 0; ev < pi->pcfg.num_events; ev++)
+ for (ev = 0; ev < pl330->pcfg.num_events; ev++)
if (pl330->events[ev] == -1) {
pl330->events[ev] = thrd->id;
return ev;
return -1;
}
-static bool _chan_ns(const struct pl330_info *pi, int i)
+static bool _chan_ns(const struct pl330_dmac *pl330, int i)
{
- return pi->pcfg.irq_ns & (1 << i);
+ return pl330->pcfg.irq_ns & (1 << i);
}
/* Upon success, returns IdentityToken for the
* allocated channel, NULL otherwise.
*/
-static void *pl330_request_channel(const struct pl330_info *pi)
+static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
{
struct pl330_thread *thrd = NULL;
- struct pl330_dmac *pl330;
unsigned long flags;
int chans, i;
- if (!pi || !pi->pl330_data)
- return NULL;
-
- pl330 = pi->pl330_data;
-
if (pl330->state == DYING)
return NULL;
- chans = pi->pcfg.num_chan;
+ chans = pl330->pcfg.num_chan;
spin_lock_irqsave(&pl330->lock, flags);
for (i = 0; i < chans; i++) {
thrd = &pl330->channels[i];
if ((thrd->free) && (!_manager_ns(thrd) ||
- _chan_ns(pi, i))) {
+ _chan_ns(pl330, i))) {
thrd->ev = _alloc_event(thrd);
if (thrd->ev >= 0) {
thrd->free = false;
thrd->lstenq = 1;
- thrd->req[0].r = NULL;
- mark_free(thrd, 0);
- thrd->req[1].r = NULL;
- mark_free(thrd, 1);
+ thrd->req[0].desc = NULL;
+ thrd->req[1].desc = NULL;
+ thrd->req_running = -1;
break;
}
}
static inline void _free_event(struct pl330_thread *thrd, int ev)
{
struct pl330_dmac *pl330 = thrd->dmac;
- struct pl330_info *pi = pl330->pinfo;
/* If the event is valid and was held by the thread */
- if (ev >= 0 && ev < pi->pcfg.num_events
+ if (ev >= 0 && ev < pl330->pcfg.num_events
&& pl330->events[ev] == thrd->id)
pl330->events[ev] = -1;
}
-static void pl330_release_channel(void *ch_id)
+static void pl330_release_channel(struct pl330_thread *thrd)
{
- struct pl330_thread *thrd = ch_id;
struct pl330_dmac *pl330;
unsigned long flags;
_stop(thrd);
- _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
- _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
+ dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
+ dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
pl330 = thrd->dmac;
/* Initialize the structure for PL330 configuration, that can be used
* by the client driver the make best use of the DMAC
*/
-static void read_dmac_config(struct pl330_info *pi)
+static void read_dmac_config(struct pl330_dmac *pl330)
{
- void __iomem *regs = pi->base;
+ void __iomem *regs = pl330->base;
u32 val;
val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
val &= CRD_DATA_WIDTH_MASK;
- pi->pcfg.data_bus_width = 8 * (1 << val);
+ pl330->pcfg.data_bus_width = 8 * (1 << val);
val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
val &= CRD_DATA_BUFF_MASK;
- pi->pcfg.data_buf_dep = val + 1;
+ pl330->pcfg.data_buf_dep = val + 1;
val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
val &= CR0_NUM_CHANS_MASK;
val += 1;
- pi->pcfg.num_chan = val;
+ pl330->pcfg.num_chan = val;
val = readl(regs + CR0);
if (val & CR0_PERIPH_REQ_SET) {
val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
val += 1;
- pi->pcfg.num_peri = val;
- pi->pcfg.peri_ns = readl(regs + CR4);
+ pl330->pcfg.num_peri = val;
+ pl330->pcfg.peri_ns = readl(regs + CR4);
} else {
- pi->pcfg.num_peri = 0;
+ pl330->pcfg.num_peri = 0;
}
val = readl(regs + CR0);
if (val & CR0_BOOT_MAN_NS)
- pi->pcfg.mode |= DMAC_MODE_NS;
+ pl330->pcfg.mode |= DMAC_MODE_NS;
else
- pi->pcfg.mode &= ~DMAC_MODE_NS;
+ pl330->pcfg.mode &= ~DMAC_MODE_NS;
val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
val &= CR0_NUM_EVENTS_MASK;
val += 1;
- pi->pcfg.num_events = val;
+ pl330->pcfg.num_events = val;
- pi->pcfg.irq_ns = readl(regs + CR3);
+ pl330->pcfg.irq_ns = readl(regs + CR3);
}
static inline void _reset_thread(struct pl330_thread *thrd)
{
struct pl330_dmac *pl330 = thrd->dmac;
- struct pl330_info *pi = pl330->pinfo;
thrd->req[0].mc_cpu = pl330->mcode_cpu
- + (thrd->id * pi->mcbufsz);
+ + (thrd->id * pl330->mcbufsz);
thrd->req[0].mc_bus = pl330->mcode_bus
- + (thrd->id * pi->mcbufsz);
- thrd->req[0].r = NULL;
- mark_free(thrd, 0);
+ + (thrd->id * pl330->mcbufsz);
+ thrd->req[0].desc = NULL;
thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
- + pi->mcbufsz / 2;
+ + pl330->mcbufsz / 2;
thrd->req[1].mc_bus = thrd->req[0].mc_bus
- + pi->mcbufsz / 2;
- thrd->req[1].r = NULL;
- mark_free(thrd, 1);
+ + pl330->mcbufsz / 2;
+ thrd->req[1].desc = NULL;
+
+ thrd->req_running = -1;
}
static int dmac_alloc_threads(struct pl330_dmac *pl330)
{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
+ int chans = pl330->pcfg.num_chan;
struct pl330_thread *thrd;
int i;
static int dmac_alloc_resources(struct pl330_dmac *pl330)
{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
+ int chans = pl330->pcfg.num_chan;
int ret;
/*
* Alloc MicroCode buffer for 'chans' Channel threads.
* A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
*/
- pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
- chans * pi->mcbufsz,
+ pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
+ chans * pl330->mcbufsz,
&pl330->mcode_bus, GFP_KERNEL);
if (!pl330->mcode_cpu) {
- dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
+ dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
__func__, __LINE__);
return -ENOMEM;
}
ret = dmac_alloc_threads(pl330);
if (ret) {
- dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
+ dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
__func__, __LINE__);
- dma_free_coherent(pi->dev,
- chans * pi->mcbufsz,
+ dma_free_coherent(pl330->ddma.dev,
+ chans * pl330->mcbufsz,
pl330->mcode_cpu, pl330->mcode_bus);
return ret;
}
return 0;
}
-static int pl330_add(struct pl330_info *pi)
+static int pl330_add(struct pl330_dmac *pl330)
{
- struct pl330_dmac *pl330;
void __iomem *regs;
int i, ret;
- if (!pi || !pi->dev)
- return -EINVAL;
-
- /* If already added */
- if (pi->pl330_data)
- return -EINVAL;
-
- /*
- * If the SoC can perform reset on the DMAC, then do it
- * before reading its configuration.
- */
- if (pi->dmac_reset)
- pi->dmac_reset(pi);
-
- regs = pi->base;
+ regs = pl330->base;
/* Check if we can handle this DMAC */
- if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
- dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id);
+ if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
+ dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
+ pl330->pcfg.periph_id);
return -EINVAL;
}
/* Read the configuration of the DMAC */
- read_dmac_config(pi);
+ read_dmac_config(pl330);
- if (pi->pcfg.num_events == 0) {
- dev_err(pi->dev, "%s:%d Can't work without events!\n",
+ if (pl330->pcfg.num_events == 0) {
+ dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n",
__func__, __LINE__);
return -EINVAL;
}
- pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
- if (!pl330) {
- dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- /* Assign the info structure and private data */
- pl330->pinfo = pi;
- pi->pl330_data = pl330;
-
spin_lock_init(&pl330->lock);
INIT_LIST_HEAD(&pl330->req_done);
/* Use default MC buffer size if not provided */
- if (!pi->mcbufsz)
- pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
+ if (!pl330->mcbufsz)
+ pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2;
/* Mark all events as free */
- for (i = 0; i < pi->pcfg.num_events; i++)
+ for (i = 0; i < pl330->pcfg.num_events; i++)
pl330->events[i] = -1;
/* Allocate resources needed by the DMAC */
ret = dmac_alloc_resources(pl330);
if (ret) {
- dev_err(pi->dev, "Unable to create channels for DMAC\n");
- kfree(pl330);
+ dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n");
return ret;
}
static int dmac_free_threads(struct pl330_dmac *pl330)
{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
struct pl330_thread *thrd;
int i;
/* Release Channel threads */
- for (i = 0; i < chans; i++) {
+ for (i = 0; i < pl330->pcfg.num_chan; i++) {
thrd = &pl330->channels[i];
- pl330_release_channel((void *)thrd);
+ pl330_release_channel(thrd);
}
/* Free memory */
return 0;
}
-static void dmac_free_resources(struct pl330_dmac *pl330)
+static void pl330_del(struct pl330_dmac *pl330)
{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
-
- dmac_free_threads(pl330);
-
- dma_free_coherent(pi->dev, chans * pi->mcbufsz,
- pl330->mcode_cpu, pl330->mcode_bus);
-}
-
-static void pl330_del(struct pl330_info *pi)
-{
- struct pl330_dmac *pl330;
-
- if (!pi || !pi->pl330_data)
- return;
-
- pl330 = pi->pl330_data;
-
pl330->state = UNINIT;
tasklet_kill(&pl330->tasks);
/* Free DMAC resources */
- dmac_free_resources(pl330);
+ dmac_free_threads(pl330);
- kfree(pl330);
- pi->pl330_data = NULL;
+ dma_free_coherent(pl330->ddma.dev,
+ pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
+ pl330->mcode_bus);
}
/* forward declaration */
if (desc->status == BUSY)
continue;
- ret = pl330_submit_req(pch->pl330_chid,
- &desc->req);
+ ret = pl330_submit_req(pch->thread, desc);
if (!ret) {
desc->status = BUSY;
} else if (ret == -EAGAIN) {
} else {
/* Unacceptable request */
desc->status = DONE;
- dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
+ dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n",
__func__, __LINE__, desc->txd.cookie);
tasklet_schedule(&pch->task);
}
fill_queue(pch);
/* Make sure the PL330 Channel thread is active */
- pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
+ spin_lock(&pch->thread->dmac->lock);
+ _start(pch->thread);
+ spin_unlock(&pch->thread->dmac->lock);
while (!list_empty(&pch->completed_list)) {
dma_async_tx_callback callback;
spin_unlock_irqrestore(&pch->lock, flags);
}
-static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
-{
- struct dma_pl330_desc *desc = token;
- struct dma_pl330_chan *pch = desc->pchan;
- unsigned long flags;
-
- /* If desc aborted */
- if (!pch)
- return;
-
- spin_lock_irqsave(&pch->lock, flags);
-
- desc->status = DONE;
-
- spin_unlock_irqrestore(&pch->lock, flags);
-
- tasklet_schedule(&pch->task);
-}
-
bool pl330_filter(struct dma_chan *chan, void *param)
{
u8 *peri_id;
struct of_dma *ofdma)
{
int count = dma_spec->args_count;
- struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
+ struct pl330_dmac *pl330 = ofdma->of_dma_data;
unsigned int chan_id;
+ if (!pl330)
+ return NULL;
+
if (count != 1)
return NULL;
chan_id = dma_spec->args[0];
- if (chan_id >= pdmac->num_peripherals)
+ if (chan_id >= pl330->num_peripherals)
return NULL;
- return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan);
+ return dma_get_slave_channel(&pl330->peripherals[chan_id].chan);
}
static int pl330_alloc_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
spin_lock_irqsave(&pch->lock, flags);
dma_cookie_init(chan);
pch->cyclic = false;
- pch->pl330_chid = pl330_request_channel(&pdmac->pif);
- if (!pch->pl330_chid) {
+ pch->thread = pl330_request_channel(pl330);
+ if (!pch->thread) {
spin_unlock_irqrestore(&pch->lock, flags);
return -ENOMEM;
}
struct dma_pl330_chan *pch = to_pchan(chan);
struct dma_pl330_desc *desc;
unsigned long flags;
- struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct pl330_dmac *pl330 = pch->dmac;
struct dma_slave_config *slave_config;
LIST_HEAD(list);
case DMA_TERMINATE_ALL:
spin_lock_irqsave(&pch->lock, flags);
- /* FLUSH the PL330 Channel thread */
- pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
+ spin_lock(&pl330->lock);
+ _stop(pch->thread);
+ spin_unlock(&pl330->lock);
+
+ pch->thread->req[0].desc = NULL;
+ pch->thread->req[1].desc = NULL;
+ pch->thread->req_running = -1;
/* Mark all desc done */
list_for_each_entry(desc, &pch->submitted_list, node) {
dma_cookie_complete(&desc->txd);
}
- list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
- list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
- list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
+ list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
+ list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
+ list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
break;
case DMA_SLAVE_CONFIG:
}
break;
default:
- dev_err(pch->dmac->pif.dev, "Not supported command.\n");
+ dev_err(pch->dmac->ddma.dev, "Not supported command.\n");
return -ENXIO;
}
spin_lock_irqsave(&pch->lock, flags);
- pl330_release_channel(pch->pl330_chid);
- pch->pl330_chid = NULL;
+ pl330_release_channel(pch->thread);
+ pch->thread = NULL;
if (pch->cyclic)
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
static inline void _init_desc(struct dma_pl330_desc *desc)
{
- desc->req.x = &desc->px;
- desc->req.token = desc;
desc->rqcfg.swap = SWAP_NO;
- desc->rqcfg.scctl = SCCTRL0;
- desc->rqcfg.dcctl = DCCTRL0;
- desc->req.cfg = &desc->rqcfg;
- desc->req.xfer_cb = dma_pl330_rqcb;
+ desc->rqcfg.scctl = CCTRL0;
+ desc->rqcfg.dcctl = CCTRL0;
desc->txd.tx_submit = pl330_tx_submit;
INIT_LIST_HEAD(&desc->node);
}
/* Returns the number of descriptors added to the DMAC pool */
-static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
{
struct dma_pl330_desc *desc;
unsigned long flags;
int i;
- if (!pdmac)
- return 0;
-
desc = kcalloc(count, sizeof(*desc), flg);
if (!desc)
return 0;
- spin_lock_irqsave(&pdmac->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, flags);
for (i = 0; i < count; i++) {
_init_desc(&desc[i]);
- list_add_tail(&desc[i].node, &pdmac->desc_pool);
+ list_add_tail(&desc[i].node, &pl330->desc_pool);
}
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, flags);
return count;
}
-static struct dma_pl330_desc *
-pluck_desc(struct dma_pl330_dmac *pdmac)
+static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
{
struct dma_pl330_desc *desc = NULL;
unsigned long flags;
- if (!pdmac)
- return NULL;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, flags);
- if (!list_empty(&pdmac->desc_pool)) {
- desc = list_entry(pdmac->desc_pool.next,
+ if (!list_empty(&pl330->desc_pool)) {
+ desc = list_entry(pl330->desc_pool.next,
struct dma_pl330_desc, node);
list_del_init(&desc->node);
desc->txd.callback = NULL;
}
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, flags);
return desc;
}
static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
{
- struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct pl330_dmac *pl330 = pch->dmac;
u8 *peri_id = pch->chan.private;
struct dma_pl330_desc *desc;
/* Pluck one desc from the pool of DMAC */
- desc = pluck_desc(pdmac);
+ desc = pluck_desc(pl330);
/* If the DMAC pool is empty, alloc new */
if (!desc) {
- if (!add_desc(pdmac, GFP_ATOMIC, 1))
+ if (!add_desc(pl330, GFP_ATOMIC, 1))
return NULL;
/* Try again */
- desc = pluck_desc(pdmac);
+ desc = pluck_desc(pl330);
if (!desc) {
- dev_err(pch->dmac->pif.dev,
+ dev_err(pch->dmac->ddma.dev,
"%s:%d ALERT!\n", __func__, __LINE__);
return NULL;
}
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
- desc->req.peri = peri_id ? pch->chan.chan_id : 0;
- desc->rqcfg.pcfg = &pch->dmac->pif.pcfg;
+ desc->peri = peri_id ? pch->chan.chan_id : 0;
+ desc->rqcfg.pcfg = &pch->dmac->pcfg;
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
static inline void fill_px(struct pl330_xfer *px,
dma_addr_t dst, dma_addr_t src, size_t len)
{
- px->next = NULL;
px->bytes = len;
px->dst_addr = dst;
px->src_addr = src;
struct dma_pl330_desc *desc = pl330_get_desc(pch);
if (!desc) {
- dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+ dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
return NULL;
}
static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
{
struct dma_pl330_chan *pch = desc->pchan;
- struct pl330_info *pi = &pch->dmac->pif;
+ struct pl330_dmac *pl330 = pch->dmac;
int burst_len;
- burst_len = pi->pcfg.data_bus_width / 8;
- burst_len *= pi->pcfg.data_buf_dep;
+ burst_len = pl330->pcfg.data_bus_width / 8;
+ burst_len *= pl330->pcfg.data_buf_dep;
burst_len >>= desc->rqcfg.brst_size;
/* src/dst_burst_len can't be more than 16 */
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct dma_pl330_desc *desc = NULL, *first = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct pl330_dmac *pl330 = pch->dmac;
unsigned int i;
dma_addr_t dst;
dma_addr_t src;
return NULL;
if (!is_slave_direction(direction)) {
- dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
+ dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n",
__func__, __LINE__);
return NULL;
}
for (i = 0; i < len / period_len; i++) {
desc = pl330_get_desc(pch);
if (!desc) {
- dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+ dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
if (!first)
return NULL;
- spin_lock_irqsave(&pdmac->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, flags);
while (!list_empty(&first->node)) {
desc = list_entry(first->node.next,
struct dma_pl330_desc, node);
- list_move_tail(&desc->node, &pdmac->desc_pool);
+ list_move_tail(&desc->node, &pl330->desc_pool);
}
- list_move_tail(&first->node, &pdmac->desc_pool);
+ list_move_tail(&first->node, &pl330->desc_pool);
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, flags);
return NULL;
}
case DMA_MEM_TO_DEV:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
- desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
case DMA_DEV_TO_MEM:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
- desc->req.rqtype = DEVTOMEM;
src = pch->fifo_addr;
dst = dma_addr;
break;
break;
}
+ desc->rqtype = direction;
desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
fill_px(&desc->px, dst, src, period_len);
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct pl330_info *pi;
+ struct pl330_dmac *pl330 = pch->dmac;
int burst;
if (unlikely(!pch || !len))
return NULL;
- pi = &pch->dmac->pif;
-
desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
if (!desc)
return NULL;
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 1;
- desc->req.rqtype = MEMTOMEM;
+ desc->rqtype = DMA_MEM_TO_MEM;
/* Select max possible burst size */
- burst = pi->pcfg.data_bus_width / 8;
+ burst = pl330->pcfg.data_bus_width / 8;
while (burst > 1) {
if (!(len % burst))
return &desc->txd;
}
-static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
+static void __pl330_giveback_desc(struct pl330_dmac *pl330,
struct dma_pl330_desc *first)
{
unsigned long flags;
if (!first)
return;
- spin_lock_irqsave(&pdmac->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, flags);
while (!list_empty(&first->node)) {
desc = list_entry(first->node.next,
struct dma_pl330_desc, node);
- list_move_tail(&desc->node, &pdmac->desc_pool);
+ list_move_tail(&desc->node, &pl330->desc_pool);
}
- list_move_tail(&first->node, &pdmac->desc_pool);
+ list_move_tail(&first->node, &pl330->desc_pool);
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, flags);
}
static struct dma_async_tx_descriptor *
desc = pl330_get_desc(pch);
if (!desc) {
- struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct pl330_dmac *pl330 = pch->dmac;
- dev_err(pch->dmac->pif.dev,
+ dev_err(pch->dmac->ddma.dev,
"%s:%d Unable to fetch desc\n",
__func__, __LINE__);
- __pl330_giveback_desc(pdmac, first);
+ __pl330_giveback_desc(pl330, first);
return NULL;
}
if (direction == DMA_MEM_TO_DEV) {
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
- desc->req.rqtype = MEMTODEV;
fill_px(&desc->px,
addr, sg_dma_address(sg), sg_dma_len(sg));
} else {
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
- desc->req.rqtype = DEVTOMEM;
fill_px(&desc->px,
sg_dma_address(sg), addr, sg_dma_len(sg));
}
desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
+ desc->rqtype = direction;
}
/* Return the last desc in the chain */
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
struct dma_pl330_platdata *pdat;
- struct dma_pl330_dmac *pdmac;
+ struct pl330_config *pcfg;
+ struct pl330_dmac *pl330;
struct dma_pl330_chan *pch, *_p;
- struct pl330_info *pi;
struct dma_device *pd;
struct resource *res;
int i, ret, irq;
return ret;
/* Allocate a new DMAC and its Channels */
- pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
- if (!pdmac) {
+ pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
+ if (!pl330) {
dev_err(&adev->dev, "unable to allocate mem\n");
return -ENOMEM;
}
- pi = &pdmac->pif;
- pi->dev = &adev->dev;
- pi->pl330_data = NULL;
- pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
+ pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
- pi->base = devm_ioremap_resource(&adev->dev, res);
- if (IS_ERR(pi->base))
- return PTR_ERR(pi->base);
+ pl330->base = devm_ioremap_resource(&adev->dev, res);
+ if (IS_ERR(pl330->base))
+ return PTR_ERR(pl330->base);
- amba_set_drvdata(adev, pdmac);
+ amba_set_drvdata(adev, pl330);
for (i = 0; i < AMBA_NR_IRQS; i++) {
irq = adev->irq[i];
if (irq) {
ret = devm_request_irq(&adev->dev, irq,
pl330_irq_handler, 0,
- dev_name(&adev->dev), pi);
+ dev_name(&adev->dev), pl330);
if (ret)
return ret;
} else {
}
}
- pi->pcfg.periph_id = adev->periphid;
- ret = pl330_add(pi);
+ pcfg = &pl330->pcfg;
+
+ pcfg->periph_id = adev->periphid;
+ ret = pl330_add(pl330);
if (ret)
return ret;
- INIT_LIST_HEAD(&pdmac->desc_pool);
- spin_lock_init(&pdmac->pool_lock);
+ INIT_LIST_HEAD(&pl330->desc_pool);
+ spin_lock_init(&pl330->pool_lock);
/* Create a descriptor pool of default size */
- if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
+ if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
dev_warn(&adev->dev, "unable to allocate desc\n");
- pd = &pdmac->ddma;
+ pd = &pl330->ddma;
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
if (pdat)
- num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
+ num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan);
else
- num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
+ num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
- pdmac->num_peripherals = num_chan;
+ pl330->num_peripherals = num_chan;
- pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
- if (!pdmac->peripherals) {
+ pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
+ if (!pl330->peripherals) {
ret = -ENOMEM;
- dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
+ dev_err(&adev->dev, "unable to allocate pl330->peripherals\n");
goto probe_err2;
}
for (i = 0; i < num_chan; i++) {
- pch = &pdmac->peripherals[i];
+ pch = &pl330->peripherals[i];
if (!adev->dev.of_node)
pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
else
INIT_LIST_HEAD(&pch->work_list);
INIT_LIST_HEAD(&pch->completed_list);
spin_lock_init(&pch->lock);
- pch->pl330_chid = NULL;
+ pch->thread = NULL;
pch->chan.device = pd;
- pch->dmac = pdmac;
+ pch->dmac = pl330;
/* Add the channel to the DMAC list */
list_add_tail(&pch->chan.device_node, &pd->channels);
pd->cap_mask = pdat->cap_mask;
} else {
dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- if (pi->pcfg.num_peri) {
+ if (pcfg->num_peri) {
dma_cap_set(DMA_SLAVE, pd->cap_mask);
dma_cap_set(DMA_CYCLIC, pd->cap_mask);
dma_cap_set(DMA_PRIVATE, pd->cap_mask);
if (adev->dev.of_node) {
ret = of_dma_controller_register(adev->dev.of_node,
- of_dma_pl330_xlate, pdmac);
+ of_dma_pl330_xlate, pl330);
if (ret) {
dev_err(&adev->dev,
"unable to register DMA to the generic DT DMA helpers\n");
}
}
- adev->dev.dma_parms = &pdmac->dma_parms;
+ adev->dev.dma_parms = &pl330->dma_parms;
/*
* This is the limit for transfers with a buswidth of 1, larger
"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
dev_info(&adev->dev,
"\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
- pi->pcfg.data_buf_dep,
- pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
- pi->pcfg.num_peri, pi->pcfg.num_events);
+ pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
+ pcfg->num_peri, pcfg->num_events);
return 0;
probe_err3:
/* Idle the DMAC */
- list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
+ list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
chan.device_node) {
/* Remove the channel */
pl330_free_chan_resources(&pch->chan);
}
probe_err2:
- pl330_del(pi);
+ pl330_del(pl330);
return ret;
}
static int pl330_remove(struct amba_device *adev)
{
- struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
+ struct pl330_dmac *pl330 = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
- struct pl330_info *pi;
-
- if (!pdmac)
- return 0;
if (adev->dev.of_node)
of_dma_controller_free(adev->dev.of_node);
- dma_async_device_unregister(&pdmac->ddma);
+ dma_async_device_unregister(&pl330->ddma);
/* Idle the DMAC */
- list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
+ list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
chan.device_node) {
/* Remove the channel */
pl330_free_chan_resources(&pch->chan);
}
- pi = &pdmac->pif;
-
- pl330_del(pi);
+ pl330_del(pl330);
return 0;
}
#define DESC_FLAG_INT BIT(15)
#define DESC_FLAG_EOT BIT(14)
#define DESC_FLAG_EOB BIT(13)
+#define DESC_FLAG_NWD BIT(12)
struct bam_async_desc {
struct virt_dma_desc vd;
u32 num_desc;
u32 xfer_len;
+
+ /* transaction flags, EOT|EOB|NWD */
+ u16 flags;
+
struct bam_desc_hw *curr_desc;
enum dma_transfer_direction dir;
if (!async_desc)
goto err_out;
+ if (flags & DMA_PREP_FENCE)
+ async_desc->flags |= DESC_FLAG_NWD;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ async_desc->flags |= DESC_FLAG_EOT;
+ else
+ async_desc->flags |= DESC_FLAG_INT;
+
async_desc->num_desc = num_alloc;
async_desc->curr_desc = async_desc->desc;
async_desc->dir = direction;
else
async_desc->xfer_len = async_desc->num_desc;
- /* set INT on last descriptor */
- desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
+ /* set any special flags on the last descriptor */
+ if (async_desc->num_desc == async_desc->xfer_len)
+ desc[async_desc->xfer_len - 1].flags = async_desc->flags;
+ else
+ desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
u32 partial = MAX_DESCRIPTORS - bchan->tail;
static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
- enum dma_transfer_direction direction, unsigned long flags,
- void *context)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
- enum dma_transfer_direction dir, unsigned long flags, void *context)
+ enum dma_transfer_direction dir, unsigned long flags)
{
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_desc *txd;
# DMA engine configuration for sh
#
+#
+# DMA Engine Helpers
+#
+
config SH_DMAE_BASE
bool "Renesas SuperH DMA Engine support"
- depends on (SUPERH && SH_DMA) || ARCH_SHMOBILE || COMPILE_TEST
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on !SUPERH || SH_DMA
depends on !SH_DMA_API
default y
select DMA_ENGINE
help
Enable support for the Renesas SuperH DMA controllers.
+#
+# DMA Controllers
+#
+
config SH_DMAE
tristate "Renesas SuperH DMAC support"
depends on SH_DMAE_BASE
help
Enable support for the Renesas SuperH DMA controllers.
+if SH_DMAE
+
+config SH_DMAE_R8A73A4
+ def_bool y
+ depends on ARCH_R8A73A4
+ depends on OF
+
+endif
+
config SUDMAC
tristate "Renesas SUDMAC support"
depends on SH_DMAE_BASE
depends on SH_DMAE_BASE
help
Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
-
-config SHDMA_R8A73A4
- def_bool y
- depends on ARCH_R8A73A4 && SH_DMAE != n
+#
+# DMA Engine Helpers
+#
+
obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
-obj-$(CONFIG_SH_DMAE) += shdma.o
+
+#
+# DMA Controllers
+#
+
shdma-y := shdmac.o
-ifeq ($(CONFIG_OF),y)
-shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o
-endif
+shdma-$(CONFIG_SH_DMAE_R8A73A4) += shdma-r8a73a4.o
shdma-objs := $(shdma-y)
+obj-$(CONFIG_SH_DMAE) += shdma.o
+
obj-$(CONFIG_SUDMAC) += sudmac.o
obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
+#include <linux/of_dma.h>
#include <linux/platform_data/dma-rcar-audmapp.h>
#include <linux/platform_device.h>
#include <linux/shdma-base.h>
struct audmapp_chan {
struct shdma_chan shdma_chan;
- struct audmapp_slave_config *config;
void __iomem *base;
+ dma_addr_t slave_addr;
+ u32 chcr;
};
struct audmapp_device {
void __iomem *chan_reg;
};
+struct audmapp_desc {
+ struct shdma_desc shdma_desc;
+ dma_addr_t src;
+ dma_addr_t dst;
+};
+
+#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
+
#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan)
+#define to_desc(sdesc) container_of(sdesc, struct audmapp_desc, shdma_desc)
#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \
struct audmapp_device, shdma_dev.dma_dev)
}
static void audmapp_start_xfer(struct shdma_chan *schan,
- struct shdma_desc *sdecs)
+ struct shdma_desc *sdesc)
{
struct audmapp_chan *auchan = to_chan(schan);
struct audmapp_device *audev = to_dev(auchan);
- struct audmapp_slave_config *cfg = auchan->config;
+ struct audmapp_desc *desc = to_desc(sdesc);
struct device *dev = audev->dev;
- u32 chcr = cfg->chcr | PDMACHCR_DE;
+ u32 chcr = auchan->chcr | PDMACHCR_DE;
- dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n",
- &cfg->src, &cfg->dst, cfg->chcr);
+ dev_dbg(dev, "src/dst/chcr = %pad/%pad/%08x\n",
+ &desc->src, &desc->dst, chcr);
- audmapp_write(auchan, cfg->src, PDMASAR);
- audmapp_write(auchan, cfg->dst, PDMADAR);
+ audmapp_write(auchan, desc->src, PDMASAR);
+ audmapp_write(auchan, desc->dst, PDMADAR);
audmapp_write(auchan, chcr, PDMACHCR);
}
-static struct audmapp_slave_config *
-audmapp_find_slave(struct audmapp_chan *auchan, int slave_id)
+static void audmapp_get_config(struct audmapp_chan *auchan, int slave_id,
+ u32 *chcr, dma_addr_t *dst)
{
struct audmapp_device *audev = to_dev(auchan);
struct audmapp_pdata *pdata = audev->pdata;
struct audmapp_slave_config *cfg;
int i;
+ *chcr = 0;
+ *dst = 0;
+
+ if (!pdata) { /* DT */
+ *chcr = ((u32)slave_id) << 16;
+ auchan->shdma_chan.slave_id = (slave_id) >> 8;
+ return;
+ }
+
+ /* non-DT */
+
if (slave_id >= AUDMAPP_SLAVE_NUMBER)
- return NULL;
+ return;
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
- if (cfg->slave_id == slave_id)
- return cfg;
-
- return NULL;
+ if (cfg->slave_id == slave_id) {
+ *chcr = cfg->chcr;
+ *dst = cfg->dst;
+ break;
+ }
}
static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
dma_addr_t slave_addr, bool try)
{
struct audmapp_chan *auchan = to_chan(schan);
- struct audmapp_slave_config *cfg =
- audmapp_find_slave(auchan, slave_id);
+ u32 chcr;
+ dma_addr_t dst;
+
+ audmapp_get_config(auchan, slave_id, &chcr, &dst);
- if (!cfg)
- return -ENODEV;
if (try)
return 0;
- auchan->config = cfg;
+ auchan->chcr = chcr;
+ auchan->slave_addr = slave_addr ? : dst;
return 0;
}
static int audmapp_desc_setup(struct shdma_chan *schan,
- struct shdma_desc *sdecs,
+ struct shdma_desc *sdesc,
dma_addr_t src, dma_addr_t dst, size_t *len)
{
- struct audmapp_chan *auchan = to_chan(schan);
- struct audmapp_slave_config *cfg = auchan->config;
-
- if (!cfg)
- return -ENODEV;
+ struct audmapp_desc *desc = to_desc(sdesc);
if (*len > (size_t)AUDMAPP_LEN_MAX)
*len = (size_t)AUDMAPP_LEN_MAX;
+ desc->src = src;
+ desc->dst = dst;
+
return 0;
}
static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan)
{
- return 0; /* always fixed address */
+ struct audmapp_chan *auchan = to_chan(schan);
+
+ return auchan->slave_addr;
}
static bool audmapp_channel_busy(struct shdma_chan *schan)
static struct shdma_desc *audmapp_embedded_desc(void *buf, int i)
{
- return &((struct shdma_desc *)buf)[i];
+ return &((struct audmapp_desc *)buf)[i].shdma_desc;
}
static const struct shdma_ops audmapp_shdma_ops = {
dma_dev->chancnt = 0;
}
+static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ u32 chcr = dma_spec->args[0];
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_channel(mask, shdma_chan_filter, NULL);
+ if (chan)
+ to_shdma_chan(chan)->hw_req = chcr;
+
+ return chan;
+}
+
static int audmapp_probe(struct platform_device *pdev)
{
struct audmapp_pdata *pdata = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
struct audmapp_device *audev;
struct shdma_dev *sdev;
struct dma_device *dma_dev;
struct resource *res;
int err, i;
- if (!pdata)
+ if (np)
+ of_dma_controller_register(np, audmapp_of_xlate, pdev);
+ else if (!pdata)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sdev = &audev->shdma_dev;
sdev->ops = &audmapp_shdma_ops;
- sdev->desc_size = sizeof(struct shdma_desc);
+ sdev->desc_size = sizeof(struct audmapp_desc);
dma_dev = &sdev->dma_dev;
dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
return 0;
}
+static const struct of_device_id audmapp_of_match[] = {
+ { .compatible = "renesas,rcar-audmapp", },
+ {},
+};
+
static struct platform_driver audmapp_driver = {
.probe = audmapp_probe,
.remove = audmapp_remove,
.driver = {
.owner = THIS_MODULE,
.name = "rcar-audmapp-engine",
+ .of_match_table = audmapp_of_match,
},
};
module_platform_driver(audmapp_driver);
((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\
(((i) & TS_HI_BIT) << TS_HI_SHIFT))
-#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz)))
-#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz)))
+#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL((xmit_sz)))
+#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL((xmit_sz)))
#endif
return 0;
}
-/*
- * This is the standard shdma filter function to be used as a replacement to the
- * "old" method, using the .private pointer. If for some reason you allocate a
- * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
- * parameter. If this filter is used, the slave driver, after calling
- * dma_request_channel(), will also have to call dmaengine_slave_config() with
- * .slave_id, .direction, and either .src_addr or .dst_addr set.
- * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
- * capability! If this becomes a requirement, hardware glue drivers, using this
- * services would have to provide their own filters, which first would check
- * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
- * this, and only then, in case of a match, call this common filter.
- * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
- * In that case the MID-RID value is used for slave channel filtering and is
- * passed to this function in the "arg" parameter.
- */
-bool shdma_chan_filter(struct dma_chan *chan, void *arg)
-{
- struct shdma_chan *schan = to_shdma_chan(chan);
- struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
- const struct shdma_ops *ops = sdev->ops;
- int match = (long)arg;
- int ret;
-
- if (match < 0)
- /* No slave requested - arbitrary channel */
- return true;
-
- if (!schan->dev->of_node && match >= slave_num)
- return false;
-
- ret = ops->set_slave(schan, match, 0, true);
- if (ret < 0)
- return false;
-
- return true;
-}
-EXPORT_SYMBOL(shdma_chan_filter);
-
static int shdma_alloc_chan_resources(struct dma_chan *chan)
{
struct shdma_chan *schan = to_shdma_chan(chan);
return ret;
}
+/*
+ * This is the standard shdma filter function to be used as a replacement to the
+ * "old" method, using the .private pointer. If for some reason you allocate a
+ * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
+ * parameter. If this filter is used, the slave driver, after calling
+ * dma_request_channel(), will also have to call dmaengine_slave_config() with
+ * .slave_id, .direction, and either .src_addr or .dst_addr set.
+ * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
+ * capability! If this becomes a requirement, hardware glue drivers, using this
+ * services would have to provide their own filters, which first would check
+ * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
+ * this, and only then, in case of a match, call this common filter.
+ * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
+ * In that case the MID-RID value is used for slave channel filtering and is
+ * passed to this function in the "arg" parameter.
+ */
+bool shdma_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct shdma_chan *schan;
+ struct shdma_dev *sdev;
+ int match = (long)arg;
+ int ret;
+
+ /* Only support channels handled by this driver. */
+ if (chan->device->device_alloc_chan_resources !=
+ shdma_alloc_chan_resources)
+ return false;
+
+ if (match < 0)
+ /* No slave requested - arbitrary channel */
+ return true;
+
+ schan = to_shdma_chan(chan);
+ if (!schan->dev->of_node && match >= slave_num)
+ return false;
+
+ sdev = to_shdma_dev(schan->dma_chan.device);
+ ret = sdev->ops->set_slave(schan, match, 0, true);
+ if (ret < 0)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(shdma_chan_filter);
+
static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
{
struct shdma_desc *desc, *_desc;
static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct shdma_chan *schan = to_shdma_chan(chan);
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ struct dma_async_tx_descriptor *desc;
const struct shdma_ops *ops = sdev->ops;
unsigned int sg_len = buf_len / period_len;
int slave_id = schan->slave_id;
dma_addr_t slave_addr;
- struct scatterlist sgl[SHDMA_MAX_SG_LEN];
+ struct scatterlist *sgl;
int i;
if (!chan)
slave_addr = ops->slave_addr(schan);
+ /*
+ * Allocate the sg list dynamically as it would consumer too much stack
+ * space.
+ */
+ sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL);
+ if (!sgl)
+ return NULL;
+
sg_init_table(sgl, sg_len);
+
for (i = 0; i < sg_len; i++) {
dma_addr_t src = buf_addr + (period_len * i);
sg_dma_len(&sgl[i]) = period_len;
}
- return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
+ desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
direction, flags, true);
+
+ kfree(sgl);
+ return desc;
}
static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
struct sh_dmae_device, shdma_dev.dma_dev)
-#ifdef CONFIG_SHDMA_R8A73A4
+#ifdef CONFIG_SH_DMAE_R8A73A4
extern const struct sh_dmae_pdata r8a73a4_dma_pdata;
#define r8a73a4_shdma_devid (&r8a73a4_dma_pdata)
#else
#include "../dmaengine.h"
#include "shdma.h"
-/* DMA register */
-#define SAR 0x00
-#define DAR 0x04
-#define TCR 0x08
-#define CHCR 0x0C
-#define DMAOR 0x40
+/* DMA registers */
+#define SAR 0x00 /* Source Address Register */
+#define DAR 0x04 /* Destination Address Register */
+#define TCR 0x08 /* Transfer Count Register */
+#define CHCR 0x0C /* Channel Control Register */
+#define DMAOR 0x40 /* DMA Operation Register */
#define TEND 0x18 /* USB-DMAC */
{
/*
* Default configuration for dual address memory-memory transfer.
- * 0x400 represents auto-request.
*/
- u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
+ u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan,
LOG2_DEFAULT_XFER_SIZE);
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
chcr_write(sh_chan, chcr);
static struct dma_async_tx_descriptor *
sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction, unsigned long flags, void *context)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
struct sirfsoc_dma_desc *sdesc = NULL;
static struct dma_async_tx_descriptor *
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction, unsigned long flags,
- void *context)
+ enum dma_transfer_direction direction, unsigned long flags)
{
unsigned int periods = buf_len / period_len;
struct dma_async_tx_descriptor *txd;
--- /dev/null
+/*
+ * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd
+ * Author: Sugar <shuge@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "virt-dma.h"
+
+/*
+ * There's 16 physical channels that can work in parallel.
+ *
+ * However we have 30 different endpoints for our requests.
+ *
+ * Since the channels are able to handle only an unidirectional
+ * transfer, we need to allocate more virtual channels so that
+ * everyone can grab one channel.
+ *
+ * Some devices can't work in both direction (mostly because it
+ * wouldn't make sense), so we have a bit fewer virtual channels than
+ * 2 channels per endpoints.
+ */
+
+#define NR_MAX_CHANNELS 16
+#define NR_MAX_REQUESTS 30
+#define NR_MAX_VCHANS 53
+
+/*
+ * Common registers
+ */
+#define DMA_IRQ_EN(x) ((x) * 0x04)
+#define DMA_IRQ_HALF BIT(0)
+#define DMA_IRQ_PKG BIT(1)
+#define DMA_IRQ_QUEUE BIT(2)
+
+#define DMA_IRQ_CHAN_NR 8
+#define DMA_IRQ_CHAN_WIDTH 4
+
+
+#define DMA_IRQ_STAT(x) ((x) * 0x04 + 0x10)
+
+#define DMA_STAT 0x30
+
+/*
+ * Channels specific registers
+ */
+#define DMA_CHAN_ENABLE 0x00
+#define DMA_CHAN_ENABLE_START BIT(0)
+#define DMA_CHAN_ENABLE_STOP 0
+
+#define DMA_CHAN_PAUSE 0x04
+#define DMA_CHAN_PAUSE_PAUSE BIT(1)
+#define DMA_CHAN_PAUSE_RESUME 0
+
+#define DMA_CHAN_LLI_ADDR 0x08
+
+#define DMA_CHAN_CUR_CFG 0x0c
+#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f)
+#define DMA_CHAN_CFG_SRC_IO_MODE BIT(5)
+#define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5)
+#define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7)
+#define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9)
+
+#define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16)
+#define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16)
+#define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16)
+#define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16)
+#define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
+
+#define DMA_CHAN_CUR_SRC 0x10
+
+#define DMA_CHAN_CUR_DST 0x14
+
+#define DMA_CHAN_CUR_CNT 0x18
+
+#define DMA_CHAN_CUR_PARA 0x1c
+
+
+/*
+ * Various hardware related defines
+ */
+#define LLI_LAST_ITEM 0xfffff800
+#define NORMAL_WAIT 8
+#define DRQ_SDRAM 1
+
+/*
+ * Hardware representation of the LLI
+ *
+ * The hardware will be fed the physical address of this structure,
+ * and read its content in order to start the transfer.
+ */
+struct sun6i_dma_lli {
+ u32 cfg;
+ u32 src;
+ u32 dst;
+ u32 len;
+ u32 para;
+ u32 p_lli_next;
+
+ /*
+ * This field is not used by the DMA controller, but will be
+ * used by the CPU to go through the list (mostly for dumping
+ * or freeing it).
+ */
+ struct sun6i_dma_lli *v_lli_next;
+};
+
+
+struct sun6i_desc {
+ struct virt_dma_desc vd;
+ dma_addr_t p_lli;
+ struct sun6i_dma_lli *v_lli;
+};
+
+struct sun6i_pchan {
+ u32 idx;
+ void __iomem *base;
+ struct sun6i_vchan *vchan;
+ struct sun6i_desc *desc;
+ struct sun6i_desc *done;
+};
+
+struct sun6i_vchan {
+ struct virt_dma_chan vc;
+ struct list_head node;
+ struct dma_slave_config cfg;
+ struct sun6i_pchan *phy;
+ u8 port;
+};
+
+struct sun6i_dma_dev {
+ struct dma_device slave;
+ void __iomem *base;
+ struct clk *clk;
+ int irq;
+ spinlock_t lock;
+ struct reset_control *rstc;
+ struct tasklet_struct task;
+ atomic_t tasklet_shutdown;
+ struct list_head pending;
+ struct dma_pool *pool;
+ struct sun6i_pchan *pchans;
+ struct sun6i_vchan *vchans;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct sun6i_dma_dev *to_sun6i_dma_dev(struct dma_device *d)
+{
+ return container_of(d, struct sun6i_dma_dev, slave);
+}
+
+static inline struct sun6i_vchan *to_sun6i_vchan(struct dma_chan *chan)
+{
+ return container_of(chan, struct sun6i_vchan, vc.chan);
+}
+
+static inline struct sun6i_desc *
+to_sun6i_desc(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct sun6i_desc, vd.tx);
+}
+
+static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev)
+{
+ dev_dbg(sdev->slave.dev, "Common register:\n"
+ "\tmask0(%04x): 0x%08x\n"
+ "\tmask1(%04x): 0x%08x\n"
+ "\tpend0(%04x): 0x%08x\n"
+ "\tpend1(%04x): 0x%08x\n"
+ "\tstats(%04x): 0x%08x\n",
+ DMA_IRQ_EN(0), readl(sdev->base + DMA_IRQ_EN(0)),
+ DMA_IRQ_EN(1), readl(sdev->base + DMA_IRQ_EN(1)),
+ DMA_IRQ_STAT(0), readl(sdev->base + DMA_IRQ_STAT(0)),
+ DMA_IRQ_STAT(1), readl(sdev->base + DMA_IRQ_STAT(1)),
+ DMA_STAT, readl(sdev->base + DMA_STAT));
+}
+
+static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
+ struct sun6i_pchan *pchan)
+{
+ phys_addr_t reg = virt_to_phys(pchan->base);
+
+ dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n"
+ "\t___en(%04x): \t0x%08x\n"
+ "\tpause(%04x): \t0x%08x\n"
+ "\tstart(%04x): \t0x%08x\n"
+ "\t__cfg(%04x): \t0x%08x\n"
+ "\t__src(%04x): \t0x%08x\n"
+ "\t__dst(%04x): \t0x%08x\n"
+ "\tcount(%04x): \t0x%08x\n"
+ "\t_para(%04x): \t0x%08x\n\n",
+ pchan->idx, ®,
+ DMA_CHAN_ENABLE,
+ readl(pchan->base + DMA_CHAN_ENABLE),
+ DMA_CHAN_PAUSE,
+ readl(pchan->base + DMA_CHAN_PAUSE),
+ DMA_CHAN_LLI_ADDR,
+ readl(pchan->base + DMA_CHAN_LLI_ADDR),
+ DMA_CHAN_CUR_CFG,
+ readl(pchan->base + DMA_CHAN_CUR_CFG),
+ DMA_CHAN_CUR_SRC,
+ readl(pchan->base + DMA_CHAN_CUR_SRC),
+ DMA_CHAN_CUR_DST,
+ readl(pchan->base + DMA_CHAN_CUR_DST),
+ DMA_CHAN_CUR_CNT,
+ readl(pchan->base + DMA_CHAN_CUR_CNT),
+ DMA_CHAN_CUR_PARA,
+ readl(pchan->base + DMA_CHAN_CUR_PARA));
+}
+
+static inline int convert_burst(u32 maxburst, u8 *burst)
+{
+ switch (maxburst) {
+ case 1:
+ *burst = 0;
+ break;
+ case 8:
+ *burst = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width)
+{
+ if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) ||
+ (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
+ return -EINVAL;
+
+ *width = addr_width >> 1;
+ return 0;
+}
+
+static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
+ struct sun6i_dma_lli *next,
+ dma_addr_t next_phy,
+ struct sun6i_desc *txd)
+{
+ if ((!prev && !txd) || !next)
+ return NULL;
+
+ if (!prev) {
+ txd->p_lli = next_phy;
+ txd->v_lli = next;
+ } else {
+ prev->p_lli_next = next_phy;
+ prev->v_lli_next = next;
+ }
+
+ next->p_lli_next = LLI_LAST_ITEM;
+ next->v_lli_next = NULL;
+
+ return next;
+}
+
+static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli,
+ dma_addr_t src,
+ dma_addr_t dst, u32 len,
+ struct dma_slave_config *config)
+{
+ u8 src_width, dst_width, src_burst, dst_burst;
+ int ret;
+
+ if (!config)
+ return -EINVAL;
+
+ ret = convert_burst(config->src_maxburst, &src_burst);
+ if (ret)
+ return ret;
+
+ ret = convert_burst(config->dst_maxburst, &dst_burst);
+ if (ret)
+ return ret;
+
+ ret = convert_buswidth(config->src_addr_width, &src_width);
+ if (ret)
+ return ret;
+
+ ret = convert_buswidth(config->dst_addr_width, &dst_width);
+ if (ret)
+ return ret;
+
+ lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
+ DMA_CHAN_CFG_SRC_WIDTH(src_width) |
+ DMA_CHAN_CFG_DST_BURST(dst_burst) |
+ DMA_CHAN_CFG_DST_WIDTH(dst_width);
+
+ lli->src = src;
+ lli->dst = dst;
+ lli->len = len;
+ lli->para = NORMAL_WAIT;
+
+ return 0;
+}
+
+static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
+ struct sun6i_dma_lli *lli)
+{
+ phys_addr_t p_lli = virt_to_phys(lli);
+
+ dev_dbg(chan2dev(&vchan->vc.chan),
+ "\n\tdesc: p - %pa v - 0x%p\n"
+ "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
+ "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
+ &p_lli, lli,
+ lli->cfg, lli->src, lli->dst,
+ lli->len, lli->para, lli->p_lli_next);
+}
+
+static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct sun6i_desc *txd = to_sun6i_desc(&vd->tx);
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vd->tx.chan->device);
+ struct sun6i_dma_lli *v_lli, *v_next;
+ dma_addr_t p_lli, p_next;
+
+ if (unlikely(!txd))
+ return;
+
+ p_lli = txd->p_lli;
+ v_lli = txd->v_lli;
+
+ while (v_lli) {
+ v_next = v_lli->v_lli_next;
+ p_next = v_lli->p_lli_next;
+
+ dma_pool_free(sdev->pool, v_lli, p_lli);
+
+ v_lli = v_next;
+ p_lli = p_next;
+ }
+
+ kfree(txd);
+}
+
+static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
+ struct sun6i_pchan *pchan = vchan->phy;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock(&sdev->lock);
+ list_del_init(&vchan->node);
+ spin_unlock(&sdev->lock);
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ vchan_get_all_descriptors(&vchan->vc, &head);
+
+ if (pchan) {
+ writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
+ writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
+
+ vchan->phy = NULL;
+ pchan->vchan = NULL;
+ pchan->desc = NULL;
+ pchan->done = NULL;
+ }
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
+ return 0;
+}
+
+static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
+ struct virt_dma_desc *desc = vchan_next_desc(&vchan->vc);
+ struct sun6i_pchan *pchan = vchan->phy;
+ u32 irq_val, irq_reg, irq_offset;
+
+ if (!pchan)
+ return -EAGAIN;
+
+ if (!desc) {
+ pchan->desc = NULL;
+ pchan->done = NULL;
+ return -EAGAIN;
+ }
+
+ list_del(&desc->node);
+
+ pchan->desc = to_sun6i_desc(&desc->tx);
+ pchan->done = NULL;
+
+ sun6i_dma_dump_lli(vchan, pchan->desc->v_lli);
+
+ irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
+ irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
+
+ irq_val = readl(sdev->base + DMA_IRQ_EN(irq_offset));
+ irq_val |= DMA_IRQ_QUEUE << (irq_offset * DMA_IRQ_CHAN_WIDTH);
+ writel(irq_val, sdev->base + DMA_IRQ_EN(irq_offset));
+
+ writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR);
+ writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE);
+
+ sun6i_dma_dump_com_regs(sdev);
+ sun6i_dma_dump_chan_regs(sdev, pchan);
+
+ return 0;
+}
+
+static void sun6i_dma_tasklet(unsigned long data)
+{
+ struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
+ struct sun6i_vchan *vchan;
+ struct sun6i_pchan *pchan;
+ unsigned int pchan_alloc = 0;
+ unsigned int pchan_idx;
+
+ list_for_each_entry(vchan, &sdev->slave.channels, vc.chan.device_node) {
+ spin_lock_irq(&vchan->vc.lock);
+
+ pchan = vchan->phy;
+
+ if (pchan && pchan->done) {
+ if (sun6i_dma_start_desc(vchan)) {
+ /*
+ * No current txd associated with this channel
+ */
+ dev_dbg(sdev->slave.dev, "pchan %u: free\n",
+ pchan->idx);
+
+ /* Mark this channel free */
+ vchan->phy = NULL;
+ pchan->vchan = NULL;
+ }
+ }
+ spin_unlock_irq(&vchan->vc.lock);
+ }
+
+ spin_lock_irq(&sdev->lock);
+ for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
+ pchan = &sdev->pchans[pchan_idx];
+
+ if (pchan->vchan || list_empty(&sdev->pending))
+ continue;
+
+ vchan = list_first_entry(&sdev->pending,
+ struct sun6i_vchan, node);
+
+ /* Remove from pending channels */
+ list_del_init(&vchan->node);
+ pchan_alloc |= BIT(pchan_idx);
+
+ /* Mark this channel allocated */
+ pchan->vchan = vchan;
+ vchan->phy = pchan;
+ dev_dbg(sdev->slave.dev, "pchan %u: alloc vchan %p\n",
+ pchan->idx, &vchan->vc);
+ }
+ spin_unlock_irq(&sdev->lock);
+
+ for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
+ if (!(pchan_alloc & BIT(pchan_idx)))
+ continue;
+
+ pchan = sdev->pchans + pchan_idx;
+ vchan = pchan->vchan;
+ if (vchan) {
+ spin_lock_irq(&vchan->vc.lock);
+ sun6i_dma_start_desc(vchan);
+ spin_unlock_irq(&vchan->vc.lock);
+ }
+ }
+}
+
+static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
+{
+ struct sun6i_dma_dev *sdev = dev_id;
+ struct sun6i_vchan *vchan;
+ struct sun6i_pchan *pchan;
+ int i, j, ret = IRQ_NONE;
+ u32 status;
+
+ for (i = 0; i < 2; i++) {
+ status = readl(sdev->base + DMA_IRQ_STAT(i));
+ if (!status)
+ continue;
+
+ dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n",
+ i ? "high" : "low", status);
+
+ writel(status, sdev->base + DMA_IRQ_STAT(i));
+
+ for (j = 0; (j < 8) && status; j++) {
+ if (status & DMA_IRQ_QUEUE) {
+ pchan = sdev->pchans + j;
+ vchan = pchan->vchan;
+
+ if (vchan) {
+ spin_lock(&vchan->vc.lock);
+ vchan_cookie_complete(&pchan->desc->vd);
+ pchan->done = pchan->desc;
+ spin_unlock(&vchan->vc.lock);
+ }
+ }
+
+ status = status >> 4;
+ }
+
+ if (!atomic_read(&sdev->tasklet_shutdown))
+ tasklet_schedule(&sdev->task);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct sun6i_dma_lli *v_lli;
+ struct sun6i_desc *txd;
+ dma_addr_t p_lli;
+ int ret;
+
+ dev_dbg(chan2dev(chan),
+ "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
+ __func__, vchan->vc.chan.chan_id, &dest, &src, len, flags);
+
+ if (!len)
+ return NULL;
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+ if (!v_lli) {
+ dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
+ goto err_txd_free;
+ }
+
+ ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig);
+ if (ret)
+ goto err_dma_free;
+
+ v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
+ DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
+ DMA_CHAN_CFG_DST_LINEAR_MODE |
+ DMA_CHAN_CFG_SRC_LINEAR_MODE;
+
+ sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
+
+ sun6i_dma_dump_lli(vchan, v_lli);
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_dma_free:
+ dma_pool_free(sdev->pool, v_lli, p_lli);
+err_txd_free:
+ kfree(txd);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct sun6i_dma_lli *v_lli, *prev = NULL;
+ struct sun6i_desc *txd;
+ struct scatterlist *sg;
+ dma_addr_t p_lli;
+ int i, ret;
+
+ if (!sgl)
+ return NULL;
+
+ if (!is_slave_direction(dir)) {
+ dev_err(chan2dev(chan), "Invalid DMA direction\n");
+ return NULL;
+ }
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+ if (!v_lli)
+ goto err_lli_free;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg),
+ sconfig->dst_addr, sg_dma_len(sg),
+ sconfig);
+ if (ret)
+ goto err_cur_lli_free;
+
+ v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE |
+ DMA_CHAN_CFG_SRC_LINEAR_MODE |
+ DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
+ DMA_CHAN_CFG_DST_DRQ(vchan->port);
+
+ dev_dbg(chan2dev(chan),
+ "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
+ __func__, vchan->vc.chan.chan_id,
+ &sconfig->dst_addr, &sg_dma_address(sg),
+ sg_dma_len(sg), flags);
+
+ } else {
+ ret = sun6i_dma_cfg_lli(v_lli, sconfig->src_addr,
+ sg_dma_address(sg), sg_dma_len(sg),
+ sconfig);
+ if (ret)
+ goto err_cur_lli_free;
+
+ v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE |
+ DMA_CHAN_CFG_SRC_IO_MODE |
+ DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
+ DMA_CHAN_CFG_SRC_DRQ(vchan->port);
+
+ dev_dbg(chan2dev(chan),
+ "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
+ __func__, vchan->vc.chan.chan_id,
+ &sg_dma_address(sg), &sconfig->src_addr,
+ sg_dma_len(sg), flags);
+ }
+
+ prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd);
+ }
+
+ dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli);
+ for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
+ sun6i_dma_dump_lli(vchan, prev);
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_cur_lli_free:
+ dma_pool_free(sdev->pool, v_lli, p_lli);
+err_lli_free:
+ for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
+ dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
+ kfree(txd);
+ return NULL;
+}
+
+static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ struct sun6i_pchan *pchan = vchan->phy;
+ unsigned long flags;
+ int ret = 0;
+
+ switch (cmd) {
+ case DMA_RESUME:
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ if (pchan) {
+ writel(DMA_CHAN_PAUSE_RESUME,
+ pchan->base + DMA_CHAN_PAUSE);
+ } else if (!list_empty(&vchan->vc.desc_issued)) {
+ spin_lock(&sdev->lock);
+ list_add_tail(&vchan->node, &sdev->pending);
+ spin_unlock(&sdev->lock);
+ }
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+ break;
+
+ case DMA_PAUSE:
+ dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
+
+ if (pchan) {
+ writel(DMA_CHAN_PAUSE_PAUSE,
+ pchan->base + DMA_CHAN_PAUSE);
+ } else {
+ spin_lock(&sdev->lock);
+ list_del_init(&vchan->node);
+ spin_unlock(&sdev->lock);
+ }
+ break;
+
+ case DMA_TERMINATE_ALL:
+ ret = sun6i_dma_terminate_all(vchan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config));
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ return ret;
+}
+
+static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ struct sun6i_pchan *pchan = vchan->phy;
+ struct sun6i_dma_lli *lli;
+ struct virt_dma_desc *vd;
+ struct sun6i_desc *txd;
+ enum dma_status ret;
+ unsigned long flags;
+ size_t bytes = 0;
+
+ ret = dma_cookie_status(chan, cookie, state);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ vd = vchan_find_desc(&vchan->vc, cookie);
+ txd = to_sun6i_desc(&vd->tx);
+
+ if (vd) {
+ for (lli = txd->v_lli; lli != NULL; lli = lli->v_lli_next)
+ bytes += lli->len;
+ } else if (!pchan || !pchan->desc) {
+ bytes = 0;
+ } else {
+ bytes = readl(pchan->base + DMA_CHAN_CUR_CNT);
+ }
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ dma_set_residue(state, bytes);
+
+ return ret;
+}
+
+static void sun6i_dma_issue_pending(struct dma_chan *chan)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ if (vchan_issue_pending(&vchan->vc)) {
+ spin_lock(&sdev->lock);
+
+ if (!vchan->phy && list_empty(&vchan->node)) {
+ list_add_tail(&vchan->node, &sdev->pending);
+ tasklet_schedule(&sdev->task);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n",
+ &vchan->vc);
+ }
+
+ spin_unlock(&sdev->lock);
+ } else {
+ dev_dbg(chan2dev(chan), "vchan %p: nothing to issue\n",
+ &vchan->vc);
+ }
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+}
+
+static int sun6i_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void sun6i_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdev->lock, flags);
+ list_del_init(&vchan->node);
+ spin_unlock_irqrestore(&sdev->lock, flags);
+
+ vchan_free_chan_resources(&vchan->vc);
+}
+
+static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct sun6i_dma_dev *sdev = ofdma->of_dma_data;
+ struct sun6i_vchan *vchan;
+ struct dma_chan *chan;
+ u8 port = dma_spec->args[0];
+
+ if (port > NR_MAX_REQUESTS)
+ return NULL;
+
+ chan = dma_get_any_slave_channel(&sdev->slave);
+ if (!chan)
+ return NULL;
+
+ vchan = to_sun6i_vchan(chan);
+ vchan->port = port;
+
+ return chan;
+}
+
+static inline void sun6i_kill_tasklet(struct sun6i_dma_dev *sdev)
+{
+ /* Disable all interrupts from DMA */
+ writel(0, sdev->base + DMA_IRQ_EN(0));
+ writel(0, sdev->base + DMA_IRQ_EN(1));
+
+ /* Prevent spurious interrupts from scheduling the tasklet */
+ atomic_inc(&sdev->tasklet_shutdown);
+
+ /* Make sure we won't have any further interrupts */
+ devm_free_irq(sdev->slave.dev, sdev->irq, sdev);
+
+ /* Actually prevent the tasklet from being scheduled */
+ tasklet_kill(&sdev->task);
+}
+
+static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
+{
+ int i;
+
+ for (i = 0; i < NR_MAX_VCHANS; i++) {
+ struct sun6i_vchan *vchan = &sdev->vchans[i];
+
+ list_del(&vchan->vc.chan.device_node);
+ tasklet_kill(&vchan->vc.task);
+ }
+}
+
+static int sun6i_dma_probe(struct platform_device *pdev)
+{
+ struct sun6i_dma_dev *sdc;
+ struct resource *res;
+ struct clk *mux, *pll6;
+ int ret, i;
+
+ sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
+ if (!sdc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sdc->base))
+ return PTR_ERR(sdc->base);
+
+ sdc->irq = platform_get_irq(pdev, 0);
+ if (sdc->irq < 0) {
+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ return sdc->irq;
+ }
+
+ sdc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sdc->clk)) {
+ dev_err(&pdev->dev, "No clock specified\n");
+ return PTR_ERR(sdc->clk);
+ }
+
+ mux = clk_get(NULL, "ahb1_mux");
+ if (IS_ERR(mux)) {
+ dev_err(&pdev->dev, "Couldn't get AHB1 Mux\n");
+ return PTR_ERR(mux);
+ }
+
+ pll6 = clk_get(NULL, "pll6");
+ if (IS_ERR(pll6)) {
+ dev_err(&pdev->dev, "Couldn't get PLL6\n");
+ clk_put(mux);
+ return PTR_ERR(pll6);
+ }
+
+ ret = clk_set_parent(mux, pll6);
+ clk_put(pll6);
+ clk_put(mux);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't reparent AHB1 on PLL6\n");
+ return ret;
+ }
+
+ sdc->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(sdc->rstc)) {
+ dev_err(&pdev->dev, "No reset controller specified\n");
+ return PTR_ERR(sdc->rstc);
+ }
+
+ sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
+ sizeof(struct sun6i_dma_lli), 4, 0);
+ if (!sdc->pool) {
+ dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, sdc);
+ INIT_LIST_HEAD(&sdc->pending);
+ spin_lock_init(&sdc->lock);
+
+ dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
+
+ INIT_LIST_HEAD(&sdc->slave.channels);
+ sdc->slave.device_alloc_chan_resources = sun6i_dma_alloc_chan_resources;
+ sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources;
+ sdc->slave.device_tx_status = sun6i_dma_tx_status;
+ sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
+ sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
+ sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
+ sdc->slave.device_control = sun6i_dma_control;
+ sdc->slave.chancnt = NR_MAX_VCHANS;
+
+ sdc->slave.dev = &pdev->dev;
+
+ sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS,
+ sizeof(struct sun6i_pchan), GFP_KERNEL);
+ if (!sdc->pchans)
+ return -ENOMEM;
+
+ sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS,
+ sizeof(struct sun6i_vchan), GFP_KERNEL);
+ if (!sdc->vchans)
+ return -ENOMEM;
+
+ tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
+
+ for (i = 0; i < NR_MAX_CHANNELS; i++) {
+ struct sun6i_pchan *pchan = &sdc->pchans[i];
+
+ pchan->idx = i;
+ pchan->base = sdc->base + 0x100 + i * 0x40;
+ }
+
+ for (i = 0; i < NR_MAX_VCHANS; i++) {
+ struct sun6i_vchan *vchan = &sdc->vchans[i];
+
+ INIT_LIST_HEAD(&vchan->node);
+ vchan->vc.desc_free = sun6i_dma_free_desc;
+ vchan_init(&vchan->vc, &sdc->slave);
+ }
+
+ ret = reset_control_deassert(sdc->rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't deassert the device from reset\n");
+ goto err_chan_free;
+ }
+
+ ret = clk_prepare_enable(sdc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable the clock\n");
+ goto err_reset_assert;
+ }
+
+ ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0,
+ dev_name(&pdev->dev), sdc);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_clk_disable;
+ }
+
+ ret = dma_async_device_register(&sdc->slave);
+ if (ret) {
+ dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
+ goto err_irq_disable;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node, sun6i_dma_of_xlate,
+ sdc);
+ if (ret) {
+ dev_err(&pdev->dev, "of_dma_controller_register failed\n");
+ goto err_dma_unregister;
+ }
+
+ return 0;
+
+err_dma_unregister:
+ dma_async_device_unregister(&sdc->slave);
+err_irq_disable:
+ sun6i_kill_tasklet(sdc);
+err_clk_disable:
+ clk_disable_unprepare(sdc->clk);
+err_reset_assert:
+ reset_control_assert(sdc->rstc);
+err_chan_free:
+ sun6i_dma_free(sdc);
+ return ret;
+}
+
+static int sun6i_dma_remove(struct platform_device *pdev)
+{
+ struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&sdc->slave);
+
+ sun6i_kill_tasklet(sdc);
+
+ clk_disable_unprepare(sdc->clk);
+ reset_control_assert(sdc->rstc);
+
+ sun6i_dma_free(sdc);
+
+ return 0;
+}
+
+static struct of_device_id sun6i_dma_match[] = {
+ { .compatible = "allwinner,sun6i-a31-dma" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver sun6i_dma_driver = {
+ .probe = sun6i_dma_probe,
+ .remove = sun6i_dma_remove,
+ .driver = {
+ .name = "sun6i-dma",
+ .of_match_table = sun6i_dma_match,
+ },
+};
+module_platform_driver(sun6i_dma_driver);
+
+MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver");
+MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_LICENSE("GPL");
static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_desc *dma_desc = NULL;
--- /dev/null
+/*
+ * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef DT_BINDINGS_NBPFAXI_H
+#define DT_BINDINGS_NBPFAXI_H
+
+/**
+ * Use "#dma-cells = <2>;" with the second integer defining slave DMA flags:
+ */
+#define NBPF_SLAVE_RQ_HIGH 1
+#define NBPF_SLAVE_RQ_LOW 2
+#define NBPF_SLAVE_RQ_LEVEL 4
+
+#endif
*/
typedef s32 dma_cookie_t;
#define DMA_MIN_COOKIE 1
-#define DMA_MAX_COOKIE INT_MAX
static inline int dma_submit_error(dma_cookie_t cookie)
{
struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context);
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags);
unsigned long flags)
{
return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
- period_len, dir, flags, NULL);
+ period_len, dir, flags);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
const char *name);
extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma);
+extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma);
#else
static inline int of_dma_controller_register(struct device_node *np,
struct dma_chan *(*of_dma_xlate)
return NULL;
}
+#define of_dma_xlate_by_chan_id NULL
+
#endif
#endif /* __LINUX_OF_DMA_H */
IMX_DMATYPE_ASRC, /* ASRC */
IMX_DMATYPE_ESAI, /* ESAI */
IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
+ IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
};
enum imx_dma_prio {
void edma_pause(unsigned channel);
void edma_resume(unsigned channel);
+void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no);
+
struct edma_rsv_info {
const s16 (*rsv_chans)[2];
};
/* DMAOR definitions */
-#define DMAOR_AE 0x00000004
+#define DMAOR_AE 0x00000004 /* Address Error Flag */
#define DMAOR_NMIF 0x00000002
-#define DMAOR_DME 0x00000001
+#define DMAOR_DME 0x00000001 /* DMA Master Enable */
/* Definitions for the SuperH DMAC */
-#define DM_INC 0x00004000
-#define DM_DEC 0x00008000
-#define DM_FIX 0x0000c000
-#define SM_INC 0x00001000
-#define SM_DEC 0x00002000
-#define SM_FIX 0x00003000
-#define CHCR_DE 0x00000001
-#define CHCR_TE 0x00000002
-#define CHCR_IE 0x00000004
+#define DM_INC 0x00004000 /* Destination addresses are incremented */
+#define DM_DEC 0x00008000 /* Destination addresses are decremented */
+#define DM_FIX 0x0000c000 /* Destination address is fixed */
+#define SM_INC 0x00001000 /* Source addresses are incremented */
+#define SM_DEC 0x00002000 /* Source addresses are decremented */
+#define SM_FIX 0x00003000 /* Source address is fixed */
+#define RS_AUTO 0x00000400 /* Auto Request */
+#define RS_ERS 0x00000800 /* DMA extended resource selector */
+#define CHCR_DE 0x00000001 /* DMA Enable */
+#define CHCR_TE 0x00000002 /* Transfer End Flag */
+#define CHCR_IE 0x00000004 /* Interrupt Enable */
#endif