Merge tag 'spi-fix-v4.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[cascardo/linux.git] / drivers / net / ethernet / stmicro / stmmac / dwmac4_descs.c
1 /*
2  * This contains the functions to handle the descriptors for DesignWare databook
3  * 4.xx.
4  *
5  * Copyright (C) 2015  STMicroelectronics Ltd
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * Author: Alexandre Torgue <alexandre.torgue@st.com>
12  */
13
14 #include <linux/stmmac.h>
15 #include "common.h"
16 #include "dwmac4_descs.h"
17
18 static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
19                                        struct dma_desc *p,
20                                        void __iomem *ioaddr)
21 {
22         struct net_device_stats *stats = (struct net_device_stats *)data;
23         unsigned int tdes3;
24         int ret = tx_done;
25
26         tdes3 = p->des3;
27
28         /* Get tx owner first */
29         if (unlikely(tdes3 & TDES3_OWN))
30                 return tx_dma_own;
31
32         /* Verify tx error by looking at the last segment. */
33         if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
34                 return tx_not_ls;
35
36         if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
37                 if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
38                         x->tx_jabber++;
39                 if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
40                         x->tx_frame_flushed++;
41                 if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
42                         x->tx_losscarrier++;
43                         stats->tx_carrier_errors++;
44                 }
45                 if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
46                         x->tx_carrier++;
47                         stats->tx_carrier_errors++;
48                 }
49                 if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
50                              (tdes3 & TDES3_EXCESSIVE_COLLISION)))
51                         stats->collisions +=
52                             (tdes3 & TDES3_COLLISION_COUNT_MASK)
53                             >> TDES3_COLLISION_COUNT_SHIFT;
54
55                 if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
56                         x->tx_deferred++;
57
58                 if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR))
59                         x->tx_underflow++;
60
61                 if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
62                         x->tx_ip_header_error++;
63
64                 if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
65                         x->tx_payload_error++;
66
67                 ret = tx_err;
68         }
69
70         if (unlikely(tdes3 & TDES3_DEFERRED))
71                 x->tx_deferred++;
72
73         return ret;
74 }
75
76 static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
77                                        struct dma_desc *p)
78 {
79         struct net_device_stats *stats = (struct net_device_stats *)data;
80         unsigned int rdes1 = p->des1;
81         unsigned int rdes2 = p->des2;
82         unsigned int rdes3 = p->des3;
83         int message_type;
84         int ret = good_frame;
85
86         if (unlikely(rdes3 & RDES3_OWN))
87                 return dma_own;
88
89         /* Verify rx error by looking at the last segment. */
90         if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
91                 return discard_frame;
92
93         if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
94                 if (unlikely(rdes3 & RDES3_GIANT_PACKET))
95                         stats->rx_length_errors++;
96                 if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
97                         x->rx_gmac_overflow++;
98
99                 if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
100                         x->rx_watchdog++;
101
102                 if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
103                         x->rx_mii++;
104
105                 if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
106                         x->rx_crc++;
107                         stats->rx_crc_errors++;
108                 }
109
110                 if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
111                         x->dribbling_bit++;
112
113                 ret = discard_frame;
114         }
115
116         message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
117
118         if (rdes1 & RDES1_IP_HDR_ERROR)
119                 x->ip_hdr_err++;
120         if (rdes1 & RDES1_IP_CSUM_BYPASSED)
121                 x->ip_csum_bypassed++;
122         if (rdes1 & RDES1_IPV4_HEADER)
123                 x->ipv4_pkt_rcvd++;
124         if (rdes1 & RDES1_IPV6_HEADER)
125                 x->ipv6_pkt_rcvd++;
126         if (message_type == RDES_EXT_SYNC)
127                 x->rx_msg_type_sync++;
128         else if (message_type == RDES_EXT_FOLLOW_UP)
129                 x->rx_msg_type_follow_up++;
130         else if (message_type == RDES_EXT_DELAY_REQ)
131                 x->rx_msg_type_delay_req++;
132         else if (message_type == RDES_EXT_DELAY_RESP)
133                 x->rx_msg_type_delay_resp++;
134         else if (message_type == RDES_EXT_PDELAY_REQ)
135                 x->rx_msg_type_pdelay_req++;
136         else if (message_type == RDES_EXT_PDELAY_RESP)
137                 x->rx_msg_type_pdelay_resp++;
138         else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
139                 x->rx_msg_type_pdelay_follow_up++;
140         else
141                 x->rx_msg_type_ext_no_ptp++;
142
143         if (rdes1 & RDES1_PTP_PACKET_TYPE)
144                 x->ptp_frame_type++;
145         if (rdes1 & RDES1_PTP_VER)
146                 x->ptp_ver++;
147         if (rdes1 & RDES1_TIMESTAMP_DROPPED)
148                 x->timestamp_dropped++;
149
150         if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
151                 x->sa_rx_filter_fail++;
152                 ret = discard_frame;
153         }
154         if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
155                 x->da_rx_filter_fail++;
156                 ret = discard_frame;
157         }
158
159         if (rdes2 & RDES2_L3_FILTER_MATCH)
160                 x->l3_filter_match++;
161         if (rdes2 & RDES2_L4_FILTER_MATCH)
162                 x->l4_filter_match++;
163         if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
164             >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
165                 x->l3_l4_filter_no_match++;
166
167         return ret;
168 }
169
170 static int dwmac4_rd_get_tx_len(struct dma_desc *p)
171 {
172         return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
173 }
174
175 static int dwmac4_get_tx_owner(struct dma_desc *p)
176 {
177         return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
178 }
179
180 static void dwmac4_set_tx_owner(struct dma_desc *p)
181 {
182         p->des3 |= TDES3_OWN;
183 }
184
185 static void dwmac4_set_rx_owner(struct dma_desc *p)
186 {
187         p->des3 |= RDES3_OWN;
188 }
189
190 static int dwmac4_get_tx_ls(struct dma_desc *p)
191 {
192         return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
193 }
194
195 static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
196 {
197         return (p->des3 & RDES3_PACKET_SIZE_MASK);
198 }
199
200 static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
201 {
202         p->des2 |= TDES2_TIMESTAMP_ENABLE;
203 }
204
205 static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
206 {
207         return (p->des3 & TDES3_TIMESTAMP_STATUS)
208                 >> TDES3_TIMESTAMP_STATUS_SHIFT;
209 }
210
211 /*  NOTE: For RX CTX bit has to be checked before
212  *  HAVE a specific function for TX and another one for RX
213  */
214 static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
215 {
216         struct dma_desc *p = (struct dma_desc *)desc;
217         u64 ns;
218
219         ns = p->des0;
220         /* convert high/sec time stamp value to nanosecond */
221         ns += p->des1 * 1000000000ULL;
222
223         return ns;
224 }
225
226 static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats)
227 {
228         struct dma_desc *p = (struct dma_desc *)desc;
229
230         return (p->des1 & RDES1_TIMESTAMP_AVAILABLE)
231                 >> RDES1_TIMESTAMP_AVAILABLE_SHIFT;
232 }
233
234 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
235                                    int mode, int end)
236 {
237         p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
238
239         if (!disable_rx_ic)
240                 p->des3 |= RDES3_INT_ON_COMPLETION_EN;
241 }
242
243 static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
244 {
245         p->des0 = 0;
246         p->des1 = 0;
247         p->des2 = 0;
248         p->des3 = 0;
249 }
250
251 static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
252                                       bool csum_flag, int mode, bool tx_own,
253                                       bool ls)
254 {
255         unsigned int tdes3 = p->des3;
256
257         p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
258
259         if (is_fs)
260                 tdes3 |= TDES3_FIRST_DESCRIPTOR;
261         else
262                 tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
263
264         if (likely(csum_flag))
265                 tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
266         else
267                 tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
268
269         if (ls)
270                 tdes3 |= TDES3_LAST_DESCRIPTOR;
271         else
272                 tdes3 &= ~TDES3_LAST_DESCRIPTOR;
273
274         /* Finally set the OWN bit. Later the DMA will start! */
275         if (tx_own)
276                 tdes3 |= TDES3_OWN;
277
278         if (is_fs & tx_own)
279                 /* When the own bit, for the first frame, has to be set, all
280                  * descriptors for the same frame has to be set before, to
281                  * avoid race condition.
282                  */
283                 wmb();
284
285         p->des3 = tdes3;
286 }
287
288 static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
289                                           int len1, int len2, bool tx_own,
290                                           bool ls, unsigned int tcphdrlen,
291                                           unsigned int tcppayloadlen)
292 {
293         unsigned int tdes3 = p->des3;
294
295         if (len1)
296                 p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
297
298         if (len2)
299                 p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
300                             & TDES2_BUFFER2_SIZE_MASK;
301
302         if (is_fs) {
303                 tdes3 |= TDES3_FIRST_DESCRIPTOR |
304                          TDES3_TCP_SEGMENTATION_ENABLE |
305                          ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
306                           TDES3_SLOT_NUMBER_MASK) |
307                          ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
308         } else {
309                 tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
310         }
311
312         if (ls)
313                 tdes3 |= TDES3_LAST_DESCRIPTOR;
314         else
315                 tdes3 &= ~TDES3_LAST_DESCRIPTOR;
316
317         /* Finally set the OWN bit. Later the DMA will start! */
318         if (tx_own)
319                 tdes3 |= TDES3_OWN;
320
321         if (is_fs & tx_own)
322                 /* When the own bit, for the first frame, has to be set, all
323                  * descriptors for the same frame has to be set before, to
324                  * avoid race condition.
325                  */
326                 wmb();
327
328         p->des3 = tdes3;
329 }
330
331 static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
332 {
333         p->des2 = 0;
334         p->des3 = 0;
335 }
336
337 static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
338 {
339         p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
340 }
341
342 static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
343 {
344         struct dma_desc *p = (struct dma_desc *)head;
345         int i;
346
347         pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
348
349         for (i = 0; i < size; i++) {
350                 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
351                         i, (unsigned int)virt_to_phys(p),
352                         p->des0, p->des1, p->des2, p->des3);
353                 p++;
354         }
355 }
356
357 static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
358 {
359         p->des0 = 0;
360         p->des1 = 0;
361         p->des2 = mss;
362         p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
363 }
364
365 const struct stmmac_desc_ops dwmac4_desc_ops = {
366         .tx_status = dwmac4_wrback_get_tx_status,
367         .rx_status = dwmac4_wrback_get_rx_status,
368         .get_tx_len = dwmac4_rd_get_tx_len,
369         .get_tx_owner = dwmac4_get_tx_owner,
370         .set_tx_owner = dwmac4_set_tx_owner,
371         .set_rx_owner = dwmac4_set_rx_owner,
372         .get_tx_ls = dwmac4_get_tx_ls,
373         .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
374         .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
375         .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
376         .get_timestamp = dwmac4_wrback_get_timestamp,
377         .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status,
378         .set_tx_ic = dwmac4_rd_set_tx_ic,
379         .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
380         .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
381         .release_tx_desc = dwmac4_release_tx_desc,
382         .init_rx_desc = dwmac4_rd_init_rx_desc,
383         .init_tx_desc = dwmac4_rd_init_tx_desc,
384         .display_ring = dwmac4_display_ring,
385         .set_mss = dwmac4_set_mss_ctxt,
386 };
387
388 const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };