Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / drivers / net / ethernet / amd / xgbe / xgbe-drv.c
1 /*
2  * AMD 10Gb Ethernet driver
3  *
4  * This file is available to you under your choice of the following two
5  * licenses:
6  *
7  * License 1: GPLv2
8  *
9  * Copyright (c) 2014 Advanced Micro Devices, Inc.
10  *
11  * This file is free software; you may copy, redistribute and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or (at
14  * your option) any later version.
15  *
16  * This file is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  * This file incorporates work covered by the following copyright and
25  * permission notice:
26  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29  *     and you.
30  *
31  *     The Software IS NOT an item of Licensed Software or Licensed Product
32  *     under any End User Software License Agreement or Agreement for Licensed
33  *     Product with Synopsys or any supplement thereto.  Permission is hereby
34  *     granted, free of charge, to any person obtaining a copy of this software
35  *     annotated with this license and the Software, to deal in the Software
36  *     without restriction, including without limitation the rights to use,
37  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38  *     of the Software, and to permit persons to whom the Software is furnished
39  *     to do so, subject to the following conditions:
40  *
41  *     The above copyright notice and this permission notice shall be included
42  *     in all copies or substantial portions of the Software.
43  *
44  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54  *     THE POSSIBILITY OF SUCH DAMAGE.
55  *
56  *
57  * License 2: Modified BSD
58  *
59  * Copyright (c) 2014 Advanced Micro Devices, Inc.
60  * All rights reserved.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions are met:
64  *     * Redistributions of source code must retain the above copyright
65  *       notice, this list of conditions and the following disclaimer.
66  *     * Redistributions in binary form must reproduce the above copyright
67  *       notice, this list of conditions and the following disclaimer in the
68  *       documentation and/or other materials provided with the distribution.
69  *     * Neither the name of Advanced Micro Devices, Inc. nor the
70  *       names of its contributors may be used to endorse or promote products
71  *       derived from this software without specific prior written permission.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83  *
84  * This file incorporates work covered by the following copyright and
85  * permission notice:
86  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89  *     and you.
90  *
91  *     The Software IS NOT an item of Licensed Software or Licensed Product
92  *     under any End User Software License Agreement or Agreement for Licensed
93  *     Product with Synopsys or any supplement thereto.  Permission is hereby
94  *     granted, free of charge, to any person obtaining a copy of this software
95  *     annotated with this license and the Software, to deal in the Software
96  *     without restriction, including without limitation the rights to use,
97  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98  *     of the Software, and to permit persons to whom the Software is furnished
99  *     to do so, subject to the following conditions:
100  *
101  *     The above copyright notice and this permission notice shall be included
102  *     in all copies or substantial portions of the Software.
103  *
104  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114  *     THE POSSIBILITY OF SUCH DAMAGE.
115  */
116
117 #include <linux/platform_device.h>
118 #include <linux/spinlock.h>
119 #include <linux/tcp.h>
120 #include <linux/if_vlan.h>
121 #include <net/busy_poll.h>
122 #include <linux/clk.h>
123 #include <linux/if_ether.h>
124 #include <linux/net_tstamp.h>
125 #include <linux/phy.h>
126
127 #include "xgbe.h"
128 #include "xgbe-common.h"
129
130 static int xgbe_one_poll(struct napi_struct *, int);
131 static int xgbe_all_poll(struct napi_struct *, int);
132 static void xgbe_set_rx_mode(struct net_device *);
133
134 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
135 {
136         struct xgbe_channel *channel_mem, *channel;
137         struct xgbe_ring *tx_ring, *rx_ring;
138         unsigned int count, i;
139         int ret = -ENOMEM;
140
141         count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
142
143         channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
144         if (!channel_mem)
145                 goto err_channel;
146
147         tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
148                           GFP_KERNEL);
149         if (!tx_ring)
150                 goto err_tx_ring;
151
152         rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
153                           GFP_KERNEL);
154         if (!rx_ring)
155                 goto err_rx_ring;
156
157         for (i = 0, channel = channel_mem; i < count; i++, channel++) {
158                 snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
159                 channel->pdata = pdata;
160                 channel->queue_index = i;
161                 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
162                                     (DMA_CH_INC * i);
163
164                 if (pdata->per_channel_irq) {
165                         /* Get the DMA interrupt (offset 1) */
166                         ret = platform_get_irq(pdata->pdev, i + 1);
167                         if (ret < 0) {
168                                 netdev_err(pdata->netdev,
169                                            "platform_get_irq %u failed\n",
170                                            i + 1);
171                                 goto err_irq;
172                         }
173
174                         channel->dma_irq = ret;
175                 }
176
177                 if (i < pdata->tx_ring_count) {
178                         spin_lock_init(&tx_ring->lock);
179                         channel->tx_ring = tx_ring++;
180                 }
181
182                 if (i < pdata->rx_ring_count) {
183                         spin_lock_init(&rx_ring->lock);
184                         channel->rx_ring = rx_ring++;
185                 }
186
187                 DBGPR("  %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
188                       channel->name, channel->queue_index, channel->dma_regs,
189                       channel->dma_irq, channel->tx_ring, channel->rx_ring);
190         }
191
192         pdata->channel = channel_mem;
193         pdata->channel_count = count;
194
195         return 0;
196
197 err_irq:
198         kfree(rx_ring);
199
200 err_rx_ring:
201         kfree(tx_ring);
202
203 err_tx_ring:
204         kfree(channel_mem);
205
206 err_channel:
207         return ret;
208 }
209
210 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
211 {
212         if (!pdata->channel)
213                 return;
214
215         kfree(pdata->channel->rx_ring);
216         kfree(pdata->channel->tx_ring);
217         kfree(pdata->channel);
218
219         pdata->channel = NULL;
220         pdata->channel_count = 0;
221 }
222
223 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
224 {
225         return (ring->rdesc_count - (ring->cur - ring->dirty));
226 }
227
228 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
229                                     struct xgbe_ring *ring, unsigned int count)
230 {
231         struct xgbe_prv_data *pdata = channel->pdata;
232
233         if (count > xgbe_tx_avail_desc(ring)) {
234                 DBGPR("  Tx queue stopped, not enough descriptors available\n");
235                 netif_stop_subqueue(pdata->netdev, channel->queue_index);
236                 ring->tx.queue_stopped = 1;
237
238                 /* If we haven't notified the hardware because of xmit_more
239                  * support, tell it now
240                  */
241                 if (ring->tx.xmit_more)
242                         pdata->hw_if.tx_start_xmit(channel, ring);
243
244                 return NETDEV_TX_BUSY;
245         }
246
247         return 0;
248 }
249
250 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
251 {
252         unsigned int rx_buf_size;
253
254         if (mtu > XGMAC_JUMBO_PACKET_MTU) {
255                 netdev_alert(netdev, "MTU exceeds maximum supported value\n");
256                 return -EINVAL;
257         }
258
259         rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
260         rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
261
262         rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
263                       ~(XGBE_RX_BUF_ALIGN - 1);
264
265         return rx_buf_size;
266 }
267
268 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
269 {
270         struct xgbe_hw_if *hw_if = &pdata->hw_if;
271         struct xgbe_channel *channel;
272         enum xgbe_int int_id;
273         unsigned int i;
274
275         channel = pdata->channel;
276         for (i = 0; i < pdata->channel_count; i++, channel++) {
277                 if (channel->tx_ring && channel->rx_ring)
278                         int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
279                 else if (channel->tx_ring)
280                         int_id = XGMAC_INT_DMA_CH_SR_TI;
281                 else if (channel->rx_ring)
282                         int_id = XGMAC_INT_DMA_CH_SR_RI;
283                 else
284                         continue;
285
286                 hw_if->enable_int(channel, int_id);
287         }
288 }
289
290 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
291 {
292         struct xgbe_hw_if *hw_if = &pdata->hw_if;
293         struct xgbe_channel *channel;
294         enum xgbe_int int_id;
295         unsigned int i;
296
297         channel = pdata->channel;
298         for (i = 0; i < pdata->channel_count; i++, channel++) {
299                 if (channel->tx_ring && channel->rx_ring)
300                         int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
301                 else if (channel->tx_ring)
302                         int_id = XGMAC_INT_DMA_CH_SR_TI;
303                 else if (channel->rx_ring)
304                         int_id = XGMAC_INT_DMA_CH_SR_RI;
305                 else
306                         continue;
307
308                 hw_if->disable_int(channel, int_id);
309         }
310 }
311
312 static irqreturn_t xgbe_isr(int irq, void *data)
313 {
314         struct xgbe_prv_data *pdata = data;
315         struct xgbe_hw_if *hw_if = &pdata->hw_if;
316         struct xgbe_channel *channel;
317         unsigned int dma_isr, dma_ch_isr;
318         unsigned int mac_isr, mac_tssr;
319         unsigned int i;
320
321         /* The DMA interrupt status register also reports MAC and MTL
322          * interrupts. So for polling mode, we just need to check for
323          * this register to be non-zero
324          */
325         dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
326         if (!dma_isr)
327                 goto isr_done;
328
329         DBGPR("  DMA_ISR = %08x\n", dma_isr);
330
331         for (i = 0; i < pdata->channel_count; i++) {
332                 if (!(dma_isr & (1 << i)))
333                         continue;
334
335                 channel = pdata->channel + i;
336
337                 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
338                 DBGPR("  DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
339
340                 /* If we get a TI or RI interrupt that means per channel DMA
341                  * interrupts are not enabled, so we use the private data napi
342                  * structure, not the per channel napi structure
343                  */
344                 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
345                     XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
346                         if (napi_schedule_prep(&pdata->napi)) {
347                                 /* Disable Tx and Rx interrupts */
348                                 xgbe_disable_rx_tx_ints(pdata);
349
350                                 /* Turn on polling */
351                                 __napi_schedule(&pdata->napi);
352                         }
353                 }
354
355                 /* Restart the device on a Fatal Bus Error */
356                 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
357                         schedule_work(&pdata->restart_work);
358
359                 /* Clear all interrupt signals */
360                 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
361         }
362
363         if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
364                 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
365
366                 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
367                         hw_if->tx_mmc_int(pdata);
368
369                 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
370                         hw_if->rx_mmc_int(pdata);
371
372                 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
373                         mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
374
375                         if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
376                                 /* Read Tx Timestamp to clear interrupt */
377                                 pdata->tx_tstamp =
378                                         hw_if->get_tx_tstamp(pdata);
379                                 schedule_work(&pdata->tx_tstamp_work);
380                         }
381                 }
382         }
383
384         DBGPR("  DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
385
386 isr_done:
387         return IRQ_HANDLED;
388 }
389
390 static irqreturn_t xgbe_dma_isr(int irq, void *data)
391 {
392         struct xgbe_channel *channel = data;
393
394         /* Per channel DMA interrupts are enabled, so we use the per
395          * channel napi structure and not the private data napi structure
396          */
397         if (napi_schedule_prep(&channel->napi)) {
398                 /* Disable Tx and Rx interrupts */
399                 disable_irq_nosync(channel->dma_irq);
400
401                 /* Turn on polling */
402                 __napi_schedule(&channel->napi);
403         }
404
405         return IRQ_HANDLED;
406 }
407
408 static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
409 {
410         struct xgbe_channel *channel = container_of(timer,
411                                                     struct xgbe_channel,
412                                                     tx_timer);
413         struct xgbe_ring *ring = channel->tx_ring;
414         struct xgbe_prv_data *pdata = channel->pdata;
415         struct napi_struct *napi;
416         unsigned long flags;
417
418         DBGPR("-->xgbe_tx_timer\n");
419
420         napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
421
422         spin_lock_irqsave(&ring->lock, flags);
423
424         if (napi_schedule_prep(napi)) {
425                 /* Disable Tx and Rx interrupts */
426                 if (pdata->per_channel_irq)
427                         disable_irq(channel->dma_irq);
428                 else
429                         xgbe_disable_rx_tx_ints(pdata);
430
431                 /* Turn on polling */
432                 __napi_schedule(napi);
433         }
434
435         channel->tx_timer_active = 0;
436
437         spin_unlock_irqrestore(&ring->lock, flags);
438
439         DBGPR("<--xgbe_tx_timer\n");
440
441         return HRTIMER_NORESTART;
442 }
443
444 static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
445 {
446         struct xgbe_channel *channel;
447         unsigned int i;
448
449         DBGPR("-->xgbe_init_tx_timers\n");
450
451         channel = pdata->channel;
452         for (i = 0; i < pdata->channel_count; i++, channel++) {
453                 if (!channel->tx_ring)
454                         break;
455
456                 DBGPR("  %s adding tx timer\n", channel->name);
457                 hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
458                              HRTIMER_MODE_REL);
459                 channel->tx_timer.function = xgbe_tx_timer;
460         }
461
462         DBGPR("<--xgbe_init_tx_timers\n");
463 }
464
465 static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
466 {
467         struct xgbe_channel *channel;
468         unsigned int i;
469
470         DBGPR("-->xgbe_stop_tx_timers\n");
471
472         channel = pdata->channel;
473         for (i = 0; i < pdata->channel_count; i++, channel++) {
474                 if (!channel->tx_ring)
475                         break;
476
477                 DBGPR("  %s deleting tx timer\n", channel->name);
478                 channel->tx_timer_active = 0;
479                 hrtimer_cancel(&channel->tx_timer);
480         }
481
482         DBGPR("<--xgbe_stop_tx_timers\n");
483 }
484
485 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
486 {
487         unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
488         struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
489
490         DBGPR("-->xgbe_get_all_hw_features\n");
491
492         mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
493         mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
494         mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
495
496         memset(hw_feat, 0, sizeof(*hw_feat));
497
498         hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
499
500         /* Hardware feature register 0 */
501         hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
502         hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
503         hw_feat->sma         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
504         hw_feat->rwk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
505         hw_feat->mgk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
506         hw_feat->mmc         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
507         hw_feat->aoe         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
508         hw_feat->ts          = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
509         hw_feat->eee         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
510         hw_feat->tx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
511         hw_feat->rx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
512         hw_feat->addn_mac    = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
513                                               ADDMACADRSEL);
514         hw_feat->ts_src      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
515         hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
516
517         /* Hardware feature register 1 */
518         hw_feat->rx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
519                                                 RXFIFOSIZE);
520         hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
521                                                 TXFIFOSIZE);
522         hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
523         hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
524         hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
525         hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
526         hw_feat->tc_cnt        = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
527         hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
528                                                   HASHTBLSZ);
529         hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
530                                                   L3L4FNUM);
531
532         /* Hardware feature register 2 */
533         hw_feat->rx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
534         hw_feat->tx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
535         hw_feat->rx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
536         hw_feat->tx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
537         hw_feat->pps_out_num  = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
538         hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
539
540         /* Translate the Hash Table size into actual number */
541         switch (hw_feat->hash_table_size) {
542         case 0:
543                 break;
544         case 1:
545                 hw_feat->hash_table_size = 64;
546                 break;
547         case 2:
548                 hw_feat->hash_table_size = 128;
549                 break;
550         case 3:
551                 hw_feat->hash_table_size = 256;
552                 break;
553         }
554
555         /* The Queue and Channel counts are zero based so increment them
556          * to get the actual number
557          */
558         hw_feat->rx_q_cnt++;
559         hw_feat->tx_q_cnt++;
560         hw_feat->rx_ch_cnt++;
561         hw_feat->tx_ch_cnt++;
562
563         DBGPR("<--xgbe_get_all_hw_features\n");
564 }
565
566 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
567 {
568         struct xgbe_channel *channel;
569         unsigned int i;
570
571         if (pdata->per_channel_irq) {
572                 channel = pdata->channel;
573                 for (i = 0; i < pdata->channel_count; i++, channel++) {
574                         if (add)
575                                 netif_napi_add(pdata->netdev, &channel->napi,
576                                                xgbe_one_poll, NAPI_POLL_WEIGHT);
577
578                         napi_enable(&channel->napi);
579                 }
580         } else {
581                 if (add)
582                         netif_napi_add(pdata->netdev, &pdata->napi,
583                                        xgbe_all_poll, NAPI_POLL_WEIGHT);
584
585                 napi_enable(&pdata->napi);
586         }
587 }
588
589 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
590 {
591         struct xgbe_channel *channel;
592         unsigned int i;
593
594         if (pdata->per_channel_irq) {
595                 channel = pdata->channel;
596                 for (i = 0; i < pdata->channel_count; i++, channel++) {
597                         napi_disable(&channel->napi);
598
599                         if (del)
600                                 netif_napi_del(&channel->napi);
601                 }
602         } else {
603                 napi_disable(&pdata->napi);
604
605                 if (del)
606                         netif_napi_del(&pdata->napi);
607         }
608 }
609
610 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
611 {
612         struct xgbe_hw_if *hw_if = &pdata->hw_if;
613
614         DBGPR("-->xgbe_init_tx_coalesce\n");
615
616         pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
617         pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
618
619         hw_if->config_tx_coalesce(pdata);
620
621         DBGPR("<--xgbe_init_tx_coalesce\n");
622 }
623
624 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
625 {
626         struct xgbe_hw_if *hw_if = &pdata->hw_if;
627
628         DBGPR("-->xgbe_init_rx_coalesce\n");
629
630         pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
631         pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
632
633         hw_if->config_rx_coalesce(pdata);
634
635         DBGPR("<--xgbe_init_rx_coalesce\n");
636 }
637
638 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
639 {
640         struct xgbe_desc_if *desc_if = &pdata->desc_if;
641         struct xgbe_channel *channel;
642         struct xgbe_ring *ring;
643         struct xgbe_ring_data *rdata;
644         unsigned int i, j;
645
646         DBGPR("-->xgbe_free_tx_data\n");
647
648         channel = pdata->channel;
649         for (i = 0; i < pdata->channel_count; i++, channel++) {
650                 ring = channel->tx_ring;
651                 if (!ring)
652                         break;
653
654                 for (j = 0; j < ring->rdesc_count; j++) {
655                         rdata = XGBE_GET_DESC_DATA(ring, j);
656                         desc_if->unmap_rdata(pdata, rdata);
657                 }
658         }
659
660         DBGPR("<--xgbe_free_tx_data\n");
661 }
662
663 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
664 {
665         struct xgbe_desc_if *desc_if = &pdata->desc_if;
666         struct xgbe_channel *channel;
667         struct xgbe_ring *ring;
668         struct xgbe_ring_data *rdata;
669         unsigned int i, j;
670
671         DBGPR("-->xgbe_free_rx_data\n");
672
673         channel = pdata->channel;
674         for (i = 0; i < pdata->channel_count; i++, channel++) {
675                 ring = channel->rx_ring;
676                 if (!ring)
677                         break;
678
679                 for (j = 0; j < ring->rdesc_count; j++) {
680                         rdata = XGBE_GET_DESC_DATA(ring, j);
681                         desc_if->unmap_rdata(pdata, rdata);
682                 }
683         }
684
685         DBGPR("<--xgbe_free_rx_data\n");
686 }
687
688 static void xgbe_adjust_link(struct net_device *netdev)
689 {
690         struct xgbe_prv_data *pdata = netdev_priv(netdev);
691         struct xgbe_hw_if *hw_if = &pdata->hw_if;
692         struct phy_device *phydev = pdata->phydev;
693         int new_state = 0;
694
695         if (phydev == NULL)
696                 return;
697
698         if (phydev->link) {
699                 /* Flow control support */
700                 if (pdata->pause_autoneg) {
701                         if (phydev->pause || phydev->asym_pause) {
702                                 pdata->tx_pause = 1;
703                                 pdata->rx_pause = 1;
704                         } else {
705                                 pdata->tx_pause = 0;
706                                 pdata->rx_pause = 0;
707                         }
708                 }
709
710                 if (pdata->tx_pause != pdata->phy_tx_pause) {
711                         hw_if->config_tx_flow_control(pdata);
712                         pdata->phy_tx_pause = pdata->tx_pause;
713                 }
714
715                 if (pdata->rx_pause != pdata->phy_rx_pause) {
716                         hw_if->config_rx_flow_control(pdata);
717                         pdata->phy_rx_pause = pdata->rx_pause;
718                 }
719
720                 /* Speed support */
721                 if (phydev->speed != pdata->phy_speed) {
722                         new_state = 1;
723
724                         switch (phydev->speed) {
725                         case SPEED_10000:
726                                 hw_if->set_xgmii_speed(pdata);
727                                 break;
728
729                         case SPEED_2500:
730                                 hw_if->set_gmii_2500_speed(pdata);
731                                 break;
732
733                         case SPEED_1000:
734                                 hw_if->set_gmii_speed(pdata);
735                                 break;
736                         }
737                         pdata->phy_speed = phydev->speed;
738                 }
739
740                 if (phydev->link != pdata->phy_link) {
741                         new_state = 1;
742                         pdata->phy_link = 1;
743                 }
744         } else if (pdata->phy_link) {
745                 new_state = 1;
746                 pdata->phy_link = 0;
747                 pdata->phy_speed = SPEED_UNKNOWN;
748         }
749
750         if (new_state)
751                 phy_print_status(phydev);
752 }
753
754 static int xgbe_phy_init(struct xgbe_prv_data *pdata)
755 {
756         struct net_device *netdev = pdata->netdev;
757         struct phy_device *phydev = pdata->phydev;
758         int ret;
759
760         pdata->phy_link = -1;
761         pdata->phy_speed = SPEED_UNKNOWN;
762         pdata->phy_tx_pause = pdata->tx_pause;
763         pdata->phy_rx_pause = pdata->rx_pause;
764
765         ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
766                                  pdata->phy_mode);
767         if (ret) {
768                 netdev_err(netdev, "phy_connect_direct failed\n");
769                 return ret;
770         }
771
772         if (!phydev->drv || (phydev->drv->phy_id == 0)) {
773                 netdev_err(netdev, "phy_id not valid\n");
774                 ret = -ENODEV;
775                 goto err_phy_connect;
776         }
777         DBGPR("  phy_connect_direct succeeded for PHY %s, link=%d\n",
778               dev_name(&phydev->dev), phydev->link);
779
780         return 0;
781
782 err_phy_connect:
783         phy_disconnect(phydev);
784
785         return ret;
786 }
787
788 static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
789 {
790         if (!pdata->phydev)
791                 return;
792
793         phy_disconnect(pdata->phydev);
794 }
795
796 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
797 {
798         struct xgbe_prv_data *pdata = netdev_priv(netdev);
799         struct xgbe_hw_if *hw_if = &pdata->hw_if;
800         unsigned long flags;
801
802         DBGPR("-->xgbe_powerdown\n");
803
804         if (!netif_running(netdev) ||
805             (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
806                 netdev_alert(netdev, "Device is already powered down\n");
807                 DBGPR("<--xgbe_powerdown\n");
808                 return -EINVAL;
809         }
810
811         phy_stop(pdata->phydev);
812
813         spin_lock_irqsave(&pdata->lock, flags);
814
815         if (caller == XGMAC_DRIVER_CONTEXT)
816                 netif_device_detach(netdev);
817
818         netif_tx_stop_all_queues(netdev);
819         xgbe_napi_disable(pdata, 0);
820
821         /* Powerdown Tx/Rx */
822         hw_if->powerdown_tx(pdata);
823         hw_if->powerdown_rx(pdata);
824
825         pdata->power_down = 1;
826
827         spin_unlock_irqrestore(&pdata->lock, flags);
828
829         DBGPR("<--xgbe_powerdown\n");
830
831         return 0;
832 }
833
834 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
835 {
836         struct xgbe_prv_data *pdata = netdev_priv(netdev);
837         struct xgbe_hw_if *hw_if = &pdata->hw_if;
838         unsigned long flags;
839
840         DBGPR("-->xgbe_powerup\n");
841
842         if (!netif_running(netdev) ||
843             (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
844                 netdev_alert(netdev, "Device is already powered up\n");
845                 DBGPR("<--xgbe_powerup\n");
846                 return -EINVAL;
847         }
848
849         spin_lock_irqsave(&pdata->lock, flags);
850
851         pdata->power_down = 0;
852
853         phy_start(pdata->phydev);
854
855         /* Enable Tx/Rx */
856         hw_if->powerup_tx(pdata);
857         hw_if->powerup_rx(pdata);
858
859         if (caller == XGMAC_DRIVER_CONTEXT)
860                 netif_device_attach(netdev);
861
862         xgbe_napi_enable(pdata, 0);
863         netif_tx_start_all_queues(netdev);
864
865         spin_unlock_irqrestore(&pdata->lock, flags);
866
867         DBGPR("<--xgbe_powerup\n");
868
869         return 0;
870 }
871
872 static int xgbe_start(struct xgbe_prv_data *pdata)
873 {
874         struct xgbe_hw_if *hw_if = &pdata->hw_if;
875         struct net_device *netdev = pdata->netdev;
876
877         DBGPR("-->xgbe_start\n");
878
879         xgbe_set_rx_mode(netdev);
880
881         hw_if->init(pdata);
882
883         phy_start(pdata->phydev);
884
885         hw_if->enable_tx(pdata);
886         hw_if->enable_rx(pdata);
887
888         xgbe_init_tx_timers(pdata);
889
890         xgbe_napi_enable(pdata, 1);
891         netif_tx_start_all_queues(netdev);
892
893         DBGPR("<--xgbe_start\n");
894
895         return 0;
896 }
897
898 static void xgbe_stop(struct xgbe_prv_data *pdata)
899 {
900         struct xgbe_hw_if *hw_if = &pdata->hw_if;
901         struct xgbe_channel *channel;
902         struct net_device *netdev = pdata->netdev;
903         struct netdev_queue *txq;
904         unsigned int i;
905
906         DBGPR("-->xgbe_stop\n");
907
908         phy_stop(pdata->phydev);
909
910         netif_tx_stop_all_queues(netdev);
911         xgbe_napi_disable(pdata, 1);
912
913         xgbe_stop_tx_timers(pdata);
914
915         hw_if->disable_tx(pdata);
916         hw_if->disable_rx(pdata);
917
918         channel = pdata->channel;
919         for (i = 0; i < pdata->channel_count; i++, channel++) {
920                 if (!channel->tx_ring)
921                         continue;
922
923                 txq = netdev_get_tx_queue(netdev, channel->queue_index);
924                 netdev_tx_reset_queue(txq);
925         }
926
927         DBGPR("<--xgbe_stop\n");
928 }
929
930 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
931 {
932         struct xgbe_channel *channel;
933         struct xgbe_hw_if *hw_if = &pdata->hw_if;
934         unsigned int i;
935
936         DBGPR("-->xgbe_restart_dev\n");
937
938         /* If not running, "restart" will happen on open */
939         if (!netif_running(pdata->netdev))
940                 return;
941
942         xgbe_stop(pdata);
943         synchronize_irq(pdata->dev_irq);
944         if (pdata->per_channel_irq) {
945                 channel = pdata->channel;
946                 for (i = 0; i < pdata->channel_count; i++, channel++)
947                         synchronize_irq(channel->dma_irq);
948         }
949
950         xgbe_free_tx_data(pdata);
951         xgbe_free_rx_data(pdata);
952
953         /* Issue software reset to device if requested */
954         if (reset)
955                 hw_if->exit(pdata);
956
957         xgbe_start(pdata);
958
959         DBGPR("<--xgbe_restart_dev\n");
960 }
961
962 static void xgbe_restart(struct work_struct *work)
963 {
964         struct xgbe_prv_data *pdata = container_of(work,
965                                                    struct xgbe_prv_data,
966                                                    restart_work);
967
968         rtnl_lock();
969
970         xgbe_restart_dev(pdata, 1);
971
972         rtnl_unlock();
973 }
974
975 static void xgbe_tx_tstamp(struct work_struct *work)
976 {
977         struct xgbe_prv_data *pdata = container_of(work,
978                                                    struct xgbe_prv_data,
979                                                    tx_tstamp_work);
980         struct skb_shared_hwtstamps hwtstamps;
981         u64 nsec;
982         unsigned long flags;
983
984         if (pdata->tx_tstamp) {
985                 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
986                                             pdata->tx_tstamp);
987
988                 memset(&hwtstamps, 0, sizeof(hwtstamps));
989                 hwtstamps.hwtstamp = ns_to_ktime(nsec);
990                 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
991         }
992
993         dev_kfree_skb_any(pdata->tx_tstamp_skb);
994
995         spin_lock_irqsave(&pdata->tstamp_lock, flags);
996         pdata->tx_tstamp_skb = NULL;
997         spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
998 }
999
1000 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1001                                       struct ifreq *ifreq)
1002 {
1003         if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1004                          sizeof(pdata->tstamp_config)))
1005                 return -EFAULT;
1006
1007         return 0;
1008 }
1009
1010 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1011                                       struct ifreq *ifreq)
1012 {
1013         struct hwtstamp_config config;
1014         unsigned int mac_tscr;
1015
1016         if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
1017                 return -EFAULT;
1018
1019         if (config.flags)
1020                 return -EINVAL;
1021
1022         mac_tscr = 0;
1023
1024         switch (config.tx_type) {
1025         case HWTSTAMP_TX_OFF:
1026                 break;
1027
1028         case HWTSTAMP_TX_ON:
1029                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1030                 break;
1031
1032         default:
1033                 return -ERANGE;
1034         }
1035
1036         switch (config.rx_filter) {
1037         case HWTSTAMP_FILTER_NONE:
1038                 break;
1039
1040         case HWTSTAMP_FILTER_ALL:
1041                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1042                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1043                 break;
1044
1045         /* PTP v2, UDP, any kind of event packet */
1046         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1047                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1048         /* PTP v1, UDP, any kind of event packet */
1049         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1050                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1051                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1052                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1053                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1054                 break;
1055
1056         /* PTP v2, UDP, Sync packet */
1057         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1058                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1059         /* PTP v1, UDP, Sync packet */
1060         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1061                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1062                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1063                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1064                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1065                 break;
1066
1067         /* PTP v2, UDP, Delay_req packet */
1068         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1069                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1070         /* PTP v1, UDP, Delay_req packet */
1071         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1072                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1073                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1074                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1075                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1076                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1077                 break;
1078
1079         /* 802.AS1, Ethernet, any kind of event packet */
1080         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1081                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1082                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1083                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1084                 break;
1085
1086         /* 802.AS1, Ethernet, Sync packet */
1087         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1088                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1089                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1090                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1091                 break;
1092
1093         /* 802.AS1, Ethernet, Delay_req packet */
1094         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1095                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1096                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1097                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1098                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1099                 break;
1100
1101         /* PTP v2/802.AS1, any layer, any kind of event packet */
1102         case HWTSTAMP_FILTER_PTP_V2_EVENT:
1103                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1104                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1105                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1106                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1107                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1108                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1109                 break;
1110
1111         /* PTP v2/802.AS1, any layer, Sync packet */
1112         case HWTSTAMP_FILTER_PTP_V2_SYNC:
1113                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1114                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1115                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1116                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1117                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1118                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1119                 break;
1120
1121         /* PTP v2/802.AS1, any layer, Delay_req packet */
1122         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1123                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1124                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1125                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1126                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1127                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1128                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1129                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1130                 break;
1131
1132         default:
1133                 return -ERANGE;
1134         }
1135
1136         pdata->hw_if.config_tstamp(pdata, mac_tscr);
1137
1138         memcpy(&pdata->tstamp_config, &config, sizeof(config));
1139
1140         return 0;
1141 }
1142
1143 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1144                                 struct sk_buff *skb,
1145                                 struct xgbe_packet_data *packet)
1146 {
1147         unsigned long flags;
1148
1149         if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1150                 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1151                 if (pdata->tx_tstamp_skb) {
1152                         /* Another timestamp in progress, ignore this one */
1153                         XGMAC_SET_BITS(packet->attributes,
1154                                        TX_PACKET_ATTRIBUTES, PTP, 0);
1155                 } else {
1156                         pdata->tx_tstamp_skb = skb_get(skb);
1157                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1158                 }
1159                 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1160         }
1161
1162         if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1163                 skb_tx_timestamp(skb);
1164 }
1165
1166 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1167 {
1168         if (vlan_tx_tag_present(skb))
1169                 packet->vlan_ctag = vlan_tx_tag_get(skb);
1170 }
1171
1172 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1173 {
1174         int ret;
1175
1176         if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1177                             TSO_ENABLE))
1178                 return 0;
1179
1180         ret = skb_cow_head(skb, 0);
1181         if (ret)
1182                 return ret;
1183
1184         packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1185         packet->tcp_header_len = tcp_hdrlen(skb);
1186         packet->tcp_payload_len = skb->len - packet->header_len;
1187         packet->mss = skb_shinfo(skb)->gso_size;
1188         DBGPR("  packet->header_len=%u\n", packet->header_len);
1189         DBGPR("  packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1190               packet->tcp_header_len, packet->tcp_payload_len);
1191         DBGPR("  packet->mss=%u\n", packet->mss);
1192
1193         /* Update the number of packets that will ultimately be transmitted
1194          * along with the extra bytes for each extra packet
1195          */
1196         packet->tx_packets = skb_shinfo(skb)->gso_segs;
1197         packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1198
1199         return 0;
1200 }
1201
1202 static int xgbe_is_tso(struct sk_buff *skb)
1203 {
1204         if (skb->ip_summed != CHECKSUM_PARTIAL)
1205                 return 0;
1206
1207         if (!skb_is_gso(skb))
1208                 return 0;
1209
1210         DBGPR("  TSO packet to be processed\n");
1211
1212         return 1;
1213 }
1214
1215 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1216                              struct xgbe_ring *ring, struct sk_buff *skb,
1217                              struct xgbe_packet_data *packet)
1218 {
1219         struct skb_frag_struct *frag;
1220         unsigned int context_desc;
1221         unsigned int len;
1222         unsigned int i;
1223
1224         packet->skb = skb;
1225
1226         context_desc = 0;
1227         packet->rdesc_count = 0;
1228
1229         packet->tx_packets = 1;
1230         packet->tx_bytes = skb->len;
1231
1232         if (xgbe_is_tso(skb)) {
1233                 /* TSO requires an extra descriptor if mss is different */
1234                 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1235                         context_desc = 1;
1236                         packet->rdesc_count++;
1237                 }
1238
1239                 /* TSO requires an extra descriptor for TSO header */
1240                 packet->rdesc_count++;
1241
1242                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1243                                TSO_ENABLE, 1);
1244                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1245                                CSUM_ENABLE, 1);
1246         } else if (skb->ip_summed == CHECKSUM_PARTIAL)
1247                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1248                                CSUM_ENABLE, 1);
1249
1250         if (vlan_tx_tag_present(skb)) {
1251                 /* VLAN requires an extra descriptor if tag is different */
1252                 if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
1253                         /* We can share with the TSO context descriptor */
1254                         if (!context_desc) {
1255                                 context_desc = 1;
1256                                 packet->rdesc_count++;
1257                         }
1258
1259                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1260                                VLAN_CTAG, 1);
1261         }
1262
1263         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1264             (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1265                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1266                                PTP, 1);
1267
1268         for (len = skb_headlen(skb); len;) {
1269                 packet->rdesc_count++;
1270                 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1271         }
1272
1273         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1274                 frag = &skb_shinfo(skb)->frags[i];
1275                 for (len = skb_frag_size(frag); len; ) {
1276                         packet->rdesc_count++;
1277                         len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1278                 }
1279         }
1280 }
1281
1282 static int xgbe_open(struct net_device *netdev)
1283 {
1284         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1285         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1286         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1287         struct xgbe_channel *channel = NULL;
1288         unsigned int i = 0;
1289         int ret;
1290
1291         DBGPR("-->xgbe_open\n");
1292
1293         /* Initialize the phy */
1294         ret = xgbe_phy_init(pdata);
1295         if (ret)
1296                 return ret;
1297
1298         /* Enable the clocks */
1299         ret = clk_prepare_enable(pdata->sysclk);
1300         if (ret) {
1301                 netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1302                 goto err_phy_init;
1303         }
1304
1305         ret = clk_prepare_enable(pdata->ptpclk);
1306         if (ret) {
1307                 netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1308                 goto err_sysclk;
1309         }
1310
1311         /* Calculate the Rx buffer size before allocating rings */
1312         ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1313         if (ret < 0)
1314                 goto err_ptpclk;
1315         pdata->rx_buf_size = ret;
1316
1317         /* Allocate the channel and ring structures */
1318         ret = xgbe_alloc_channels(pdata);
1319         if (ret)
1320                 goto err_ptpclk;
1321
1322         /* Allocate the ring descriptors and buffers */
1323         ret = desc_if->alloc_ring_resources(pdata);
1324         if (ret)
1325                 goto err_channels;
1326
1327         /* Initialize the device restart and Tx timestamp work struct */
1328         INIT_WORK(&pdata->restart_work, xgbe_restart);
1329         INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1330
1331         /* Request interrupts */
1332         ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1333                                netdev->name, pdata);
1334         if (ret) {
1335                 netdev_alert(netdev, "error requesting irq %d\n",
1336                              pdata->dev_irq);
1337                 goto err_rings;
1338         }
1339
1340         if (pdata->per_channel_irq) {
1341                 channel = pdata->channel;
1342                 for (i = 0; i < pdata->channel_count; i++, channel++) {
1343                         snprintf(channel->dma_irq_name,
1344                                  sizeof(channel->dma_irq_name) - 1,
1345                                  "%s-TxRx-%u", netdev_name(netdev),
1346                                  channel->queue_index);
1347
1348                         ret = devm_request_irq(pdata->dev, channel->dma_irq,
1349                                                xgbe_dma_isr, 0,
1350                                                channel->dma_irq_name, channel);
1351                         if (ret) {
1352                                 netdev_alert(netdev,
1353                                              "error requesting irq %d\n",
1354                                              channel->dma_irq);
1355                                 goto err_irq;
1356                         }
1357                 }
1358         }
1359
1360         ret = xgbe_start(pdata);
1361         if (ret)
1362                 goto err_start;
1363
1364         DBGPR("<--xgbe_open\n");
1365
1366         return 0;
1367
1368 err_start:
1369         hw_if->exit(pdata);
1370
1371 err_irq:
1372         if (pdata->per_channel_irq) {
1373                 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1374                 for (i--, channel--; i < pdata->channel_count; i--, channel--)
1375                         devm_free_irq(pdata->dev, channel->dma_irq, channel);
1376         }
1377
1378         devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1379
1380 err_rings:
1381         desc_if->free_ring_resources(pdata);
1382
1383 err_channels:
1384         xgbe_free_channels(pdata);
1385
1386 err_ptpclk:
1387         clk_disable_unprepare(pdata->ptpclk);
1388
1389 err_sysclk:
1390         clk_disable_unprepare(pdata->sysclk);
1391
1392 err_phy_init:
1393         xgbe_phy_exit(pdata);
1394
1395         return ret;
1396 }
1397
1398 static int xgbe_close(struct net_device *netdev)
1399 {
1400         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1401         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1402         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1403         struct xgbe_channel *channel;
1404         unsigned int i;
1405
1406         DBGPR("-->xgbe_close\n");
1407
1408         /* Stop the device */
1409         xgbe_stop(pdata);
1410
1411         /* Issue software reset to device */
1412         hw_if->exit(pdata);
1413
1414         /* Free the ring descriptors and buffers */
1415         desc_if->free_ring_resources(pdata);
1416
1417         /* Release the interrupts */
1418         devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1419         if (pdata->per_channel_irq) {
1420                 channel = pdata->channel;
1421                 for (i = 0; i < pdata->channel_count; i++, channel++)
1422                         devm_free_irq(pdata->dev, channel->dma_irq, channel);
1423         }
1424
1425         /* Free the channel and ring structures */
1426         xgbe_free_channels(pdata);
1427
1428         /* Disable the clocks */
1429         clk_disable_unprepare(pdata->ptpclk);
1430         clk_disable_unprepare(pdata->sysclk);
1431
1432         /* Release the phy */
1433         xgbe_phy_exit(pdata);
1434
1435         DBGPR("<--xgbe_close\n");
1436
1437         return 0;
1438 }
1439
1440 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1441 {
1442         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1443         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1444         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1445         struct xgbe_channel *channel;
1446         struct xgbe_ring *ring;
1447         struct xgbe_packet_data *packet;
1448         struct netdev_queue *txq;
1449         unsigned long flags;
1450         int ret;
1451
1452         DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1453
1454         channel = pdata->channel + skb->queue_mapping;
1455         txq = netdev_get_tx_queue(netdev, channel->queue_index);
1456         ring = channel->tx_ring;
1457         packet = &ring->packet_data;
1458
1459         ret = NETDEV_TX_OK;
1460
1461         spin_lock_irqsave(&ring->lock, flags);
1462
1463         if (skb->len == 0) {
1464                 netdev_err(netdev, "empty skb received from stack\n");
1465                 dev_kfree_skb_any(skb);
1466                 goto tx_netdev_return;
1467         }
1468
1469         /* Calculate preliminary packet info */
1470         memset(packet, 0, sizeof(*packet));
1471         xgbe_packet_info(pdata, ring, skb, packet);
1472
1473         /* Check that there are enough descriptors available */
1474         ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
1475         if (ret)
1476                 goto tx_netdev_return;
1477
1478         ret = xgbe_prep_tso(skb, packet);
1479         if (ret) {
1480                 netdev_err(netdev, "error processing TSO packet\n");
1481                 dev_kfree_skb_any(skb);
1482                 goto tx_netdev_return;
1483         }
1484         xgbe_prep_vlan(skb, packet);
1485
1486         if (!desc_if->map_tx_skb(channel, skb)) {
1487                 dev_kfree_skb_any(skb);
1488                 goto tx_netdev_return;
1489         }
1490
1491         xgbe_prep_tx_tstamp(pdata, skb, packet);
1492
1493         /* Report on the actual number of bytes (to be) sent */
1494         netdev_tx_sent_queue(txq, packet->tx_bytes);
1495
1496         /* Configure required descriptor fields for transmission */
1497         hw_if->dev_xmit(channel);
1498
1499 #ifdef XGMAC_ENABLE_TX_PKT_DUMP
1500         xgbe_print_pkt(netdev, skb, true);
1501 #endif
1502
1503         /* Stop the queue in advance if there may not be enough descriptors */
1504         xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
1505
1506         ret = NETDEV_TX_OK;
1507
1508 tx_netdev_return:
1509         spin_unlock_irqrestore(&ring->lock, flags);
1510
1511         DBGPR("<--xgbe_xmit\n");
1512
1513         return ret;
1514 }
1515
1516 static void xgbe_set_rx_mode(struct net_device *netdev)
1517 {
1518         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1519         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1520         unsigned int pr_mode, am_mode;
1521
1522         DBGPR("-->xgbe_set_rx_mode\n");
1523
1524         pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1525         am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1526
1527         hw_if->set_promiscuous_mode(pdata, pr_mode);
1528         hw_if->set_all_multicast_mode(pdata, am_mode);
1529
1530         hw_if->add_mac_addresses(pdata);
1531
1532         DBGPR("<--xgbe_set_rx_mode\n");
1533 }
1534
1535 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
1536 {
1537         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1538         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1539         struct sockaddr *saddr = addr;
1540
1541         DBGPR("-->xgbe_set_mac_address\n");
1542
1543         if (!is_valid_ether_addr(saddr->sa_data))
1544                 return -EADDRNOTAVAIL;
1545
1546         memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
1547
1548         hw_if->set_mac_address(pdata, netdev->dev_addr);
1549
1550         DBGPR("<--xgbe_set_mac_address\n");
1551
1552         return 0;
1553 }
1554
1555 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
1556 {
1557         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1558         int ret;
1559
1560         switch (cmd) {
1561         case SIOCGHWTSTAMP:
1562                 ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
1563                 break;
1564
1565         case SIOCSHWTSTAMP:
1566                 ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
1567                 break;
1568
1569         default:
1570                 ret = -EOPNOTSUPP;
1571         }
1572
1573         return ret;
1574 }
1575
1576 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1577 {
1578         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1579         int ret;
1580
1581         DBGPR("-->xgbe_change_mtu\n");
1582
1583         ret = xgbe_calc_rx_buf_size(netdev, mtu);
1584         if (ret < 0)
1585                 return ret;
1586
1587         pdata->rx_buf_size = ret;
1588         netdev->mtu = mtu;
1589
1590         xgbe_restart_dev(pdata, 0);
1591
1592         DBGPR("<--xgbe_change_mtu\n");
1593
1594         return 0;
1595 }
1596
1597 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
1598                                                   struct rtnl_link_stats64 *s)
1599 {
1600         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1601         struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
1602
1603         DBGPR("-->%s\n", __func__);
1604
1605         pdata->hw_if.read_mmc_stats(pdata);
1606
1607         s->rx_packets = pstats->rxframecount_gb;
1608         s->rx_bytes = pstats->rxoctetcount_gb;
1609         s->rx_errors = pstats->rxframecount_gb -
1610                        pstats->rxbroadcastframes_g -
1611                        pstats->rxmulticastframes_g -
1612                        pstats->rxunicastframes_g;
1613         s->multicast = pstats->rxmulticastframes_g;
1614         s->rx_length_errors = pstats->rxlengtherror;
1615         s->rx_crc_errors = pstats->rxcrcerror;
1616         s->rx_fifo_errors = pstats->rxfifooverflow;
1617
1618         s->tx_packets = pstats->txframecount_gb;
1619         s->tx_bytes = pstats->txoctetcount_gb;
1620         s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
1621         s->tx_dropped = netdev->stats.tx_dropped;
1622
1623         DBGPR("<--%s\n", __func__);
1624
1625         return s;
1626 }
1627
1628 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1629                                 u16 vid)
1630 {
1631         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1632         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1633
1634         DBGPR("-->%s\n", __func__);
1635
1636         set_bit(vid, pdata->active_vlans);
1637         hw_if->update_vlan_hash_table(pdata);
1638
1639         DBGPR("<--%s\n", __func__);
1640
1641         return 0;
1642 }
1643
1644 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1645                                  u16 vid)
1646 {
1647         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1648         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1649
1650         DBGPR("-->%s\n", __func__);
1651
1652         clear_bit(vid, pdata->active_vlans);
1653         hw_if->update_vlan_hash_table(pdata);
1654
1655         DBGPR("<--%s\n", __func__);
1656
1657         return 0;
1658 }
1659
1660 #ifdef CONFIG_NET_POLL_CONTROLLER
1661 static void xgbe_poll_controller(struct net_device *netdev)
1662 {
1663         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1664         struct xgbe_channel *channel;
1665         unsigned int i;
1666
1667         DBGPR("-->xgbe_poll_controller\n");
1668
1669         if (pdata->per_channel_irq) {
1670                 channel = pdata->channel;
1671                 for (i = 0; i < pdata->channel_count; i++, channel++)
1672                         xgbe_dma_isr(channel->dma_irq, channel);
1673         } else {
1674                 disable_irq(pdata->dev_irq);
1675                 xgbe_isr(pdata->dev_irq, pdata);
1676                 enable_irq(pdata->dev_irq);
1677         }
1678
1679         DBGPR("<--xgbe_poll_controller\n");
1680 }
1681 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1682
1683 static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
1684 {
1685         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1686         unsigned int offset, queue;
1687         u8 i;
1688
1689         if (tc && (tc != pdata->hw_feat.tc_cnt))
1690                 return -EINVAL;
1691
1692         if (tc) {
1693                 netdev_set_num_tc(netdev, tc);
1694                 for (i = 0, queue = 0, offset = 0; i < tc; i++) {
1695                         while ((queue < pdata->tx_q_count) &&
1696                                (pdata->q2tc_map[queue] == i))
1697                                 queue++;
1698
1699                         DBGPR("  TC%u using TXq%u-%u\n", i, offset, queue - 1);
1700                         netdev_set_tc_queue(netdev, i, queue - offset, offset);
1701                         offset = queue;
1702                 }
1703         } else {
1704                 netdev_reset_tc(netdev);
1705         }
1706
1707         return 0;
1708 }
1709
1710 static int xgbe_set_features(struct net_device *netdev,
1711                              netdev_features_t features)
1712 {
1713         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1714         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1715         netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
1716         int ret = 0;
1717
1718         rxhash = pdata->netdev_features & NETIF_F_RXHASH;
1719         rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1720         rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1721         rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
1722
1723         if ((features & NETIF_F_RXHASH) && !rxhash)
1724                 ret = hw_if->enable_rss(pdata);
1725         else if (!(features & NETIF_F_RXHASH) && rxhash)
1726                 ret = hw_if->disable_rss(pdata);
1727         if (ret)
1728                 return ret;
1729
1730         if ((features & NETIF_F_RXCSUM) && !rxcsum)
1731                 hw_if->enable_rx_csum(pdata);
1732         else if (!(features & NETIF_F_RXCSUM) && rxcsum)
1733                 hw_if->disable_rx_csum(pdata);
1734
1735         if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
1736                 hw_if->enable_rx_vlan_stripping(pdata);
1737         else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
1738                 hw_if->disable_rx_vlan_stripping(pdata);
1739
1740         if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
1741                 hw_if->enable_rx_vlan_filtering(pdata);
1742         else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
1743                 hw_if->disable_rx_vlan_filtering(pdata);
1744
1745         pdata->netdev_features = features;
1746
1747         DBGPR("<--xgbe_set_features\n");
1748
1749         return 0;
1750 }
1751
1752 static const struct net_device_ops xgbe_netdev_ops = {
1753         .ndo_open               = xgbe_open,
1754         .ndo_stop               = xgbe_close,
1755         .ndo_start_xmit         = xgbe_xmit,
1756         .ndo_set_rx_mode        = xgbe_set_rx_mode,
1757         .ndo_set_mac_address    = xgbe_set_mac_address,
1758         .ndo_validate_addr      = eth_validate_addr,
1759         .ndo_do_ioctl           = xgbe_ioctl,
1760         .ndo_change_mtu         = xgbe_change_mtu,
1761         .ndo_get_stats64        = xgbe_get_stats64,
1762         .ndo_vlan_rx_add_vid    = xgbe_vlan_rx_add_vid,
1763         .ndo_vlan_rx_kill_vid   = xgbe_vlan_rx_kill_vid,
1764 #ifdef CONFIG_NET_POLL_CONTROLLER
1765         .ndo_poll_controller    = xgbe_poll_controller,
1766 #endif
1767         .ndo_setup_tc           = xgbe_setup_tc,
1768         .ndo_set_features       = xgbe_set_features,
1769 };
1770
1771 struct net_device_ops *xgbe_get_netdev_ops(void)
1772 {
1773         return (struct net_device_ops *)&xgbe_netdev_ops;
1774 }
1775
1776 static void xgbe_rx_refresh(struct xgbe_channel *channel)
1777 {
1778         struct xgbe_prv_data *pdata = channel->pdata;
1779         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1780         struct xgbe_ring *ring = channel->rx_ring;
1781         struct xgbe_ring_data *rdata;
1782
1783         desc_if->realloc_rx_buffer(channel);
1784
1785         /* Update the Rx Tail Pointer Register with address of
1786          * the last cleaned entry */
1787         rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
1788         XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1789                           lower_32_bits(rdata->rdesc_dma));
1790 }
1791
1792 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1793                                        struct xgbe_ring_data *rdata,
1794                                        unsigned int *len)
1795 {
1796         struct net_device *netdev = pdata->netdev;
1797         struct sk_buff *skb;
1798         u8 *packet;
1799         unsigned int copy_len;
1800
1801         skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len);
1802         if (!skb)
1803                 return NULL;
1804
1805         packet = page_address(rdata->rx.hdr.pa.pages) +
1806                  rdata->rx.hdr.pa.pages_offset;
1807         copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
1808         copy_len = min(rdata->rx.hdr.dma_len, copy_len);
1809         skb_copy_to_linear_data(skb, packet, copy_len);
1810         skb_put(skb, copy_len);
1811
1812         *len -= copy_len;
1813
1814         return skb;
1815 }
1816
1817 static int xgbe_tx_poll(struct xgbe_channel *channel)
1818 {
1819         struct xgbe_prv_data *pdata = channel->pdata;
1820         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1821         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1822         struct xgbe_ring *ring = channel->tx_ring;
1823         struct xgbe_ring_data *rdata;
1824         struct xgbe_ring_desc *rdesc;
1825         struct net_device *netdev = pdata->netdev;
1826         struct netdev_queue *txq;
1827         unsigned long flags;
1828         int processed = 0;
1829         unsigned int tx_packets = 0, tx_bytes = 0;
1830
1831         DBGPR("-->xgbe_tx_poll\n");
1832
1833         /* Nothing to do if there isn't a Tx ring for this channel */
1834         if (!ring)
1835                 return 0;
1836
1837         txq = netdev_get_tx_queue(netdev, channel->queue_index);
1838
1839         spin_lock_irqsave(&ring->lock, flags);
1840
1841         while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1842                (ring->dirty != ring->cur)) {
1843                 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1844                 rdesc = rdata->rdesc;
1845
1846                 if (!hw_if->tx_complete(rdesc))
1847                         break;
1848
1849                 /* Make sure descriptor fields are read after reading the OWN
1850                  * bit */
1851                 rmb();
1852
1853 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
1854                 xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
1855 #endif
1856
1857                 if (hw_if->is_last_desc(rdesc)) {
1858                         tx_packets += rdata->tx.packets;
1859                         tx_bytes += rdata->tx.bytes;
1860                 }
1861
1862                 /* Free the SKB and reset the descriptor for re-use */
1863                 desc_if->unmap_rdata(pdata, rdata);
1864                 hw_if->tx_desc_reset(rdata);
1865
1866                 processed++;
1867                 ring->dirty++;
1868         }
1869
1870         if (!processed)
1871                 goto unlock;
1872
1873         netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1874
1875         if ((ring->tx.queue_stopped == 1) &&
1876             (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
1877                 ring->tx.queue_stopped = 0;
1878                 netif_tx_wake_queue(txq);
1879         }
1880
1881         DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1882
1883 unlock:
1884         spin_unlock_irqrestore(&ring->lock, flags);
1885
1886         return processed;
1887 }
1888
1889 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1890 {
1891         struct xgbe_prv_data *pdata = channel->pdata;
1892         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1893         struct xgbe_ring *ring = channel->rx_ring;
1894         struct xgbe_ring_data *rdata;
1895         struct xgbe_packet_data *packet;
1896         struct net_device *netdev = pdata->netdev;
1897         struct napi_struct *napi;
1898         struct sk_buff *skb;
1899         struct skb_shared_hwtstamps *hwtstamps;
1900         unsigned int incomplete, error, context_next, context;
1901         unsigned int len, put_len, max_len;
1902         unsigned int received = 0;
1903         int packet_count = 0;
1904
1905         DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1906
1907         /* Nothing to do if there isn't a Rx ring for this channel */
1908         if (!ring)
1909                 return 0;
1910
1911         napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
1912
1913         rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1914         packet = &ring->packet_data;
1915         while (packet_count < budget) {
1916                 DBGPR("  cur = %d\n", ring->cur);
1917
1918                 /* First time in loop see if we need to restore state */
1919                 if (!received && rdata->state_saved) {
1920                         incomplete = rdata->state.incomplete;
1921                         context_next = rdata->state.context_next;
1922                         skb = rdata->state.skb;
1923                         error = rdata->state.error;
1924                         len = rdata->state.len;
1925                 } else {
1926                         memset(packet, 0, sizeof(*packet));
1927                         incomplete = 0;
1928                         context_next = 0;
1929                         skb = NULL;
1930                         error = 0;
1931                         len = 0;
1932                 }
1933
1934 read_again:
1935                 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1936
1937                 if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
1938                         xgbe_rx_refresh(channel);
1939
1940                 if (hw_if->dev_read(channel))
1941                         break;
1942
1943                 received++;
1944                 ring->cur++;
1945                 ring->dirty++;
1946
1947                 incomplete = XGMAC_GET_BITS(packet->attributes,
1948                                             RX_PACKET_ATTRIBUTES,
1949                                             INCOMPLETE);
1950                 context_next = XGMAC_GET_BITS(packet->attributes,
1951                                               RX_PACKET_ATTRIBUTES,
1952                                               CONTEXT_NEXT);
1953                 context = XGMAC_GET_BITS(packet->attributes,
1954                                          RX_PACKET_ATTRIBUTES,
1955                                          CONTEXT);
1956
1957                 /* Earlier error, just drain the remaining data */
1958                 if ((incomplete || context_next) && error)
1959                         goto read_again;
1960
1961                 if (error || packet->errors) {
1962                         if (packet->errors)
1963                                 DBGPR("Error in received packet\n");
1964                         dev_kfree_skb(skb);
1965                         goto next_packet;
1966                 }
1967
1968                 if (!context) {
1969                         put_len = rdata->rx.len - len;
1970                         len += put_len;
1971
1972                         if (!skb) {
1973                                 dma_sync_single_for_cpu(pdata->dev,
1974                                                         rdata->rx.hdr.dma,
1975                                                         rdata->rx.hdr.dma_len,
1976                                                         DMA_FROM_DEVICE);
1977
1978                                 skb = xgbe_create_skb(pdata, rdata, &put_len);
1979                                 if (!skb) {
1980                                         error = 1;
1981                                         goto skip_data;
1982                                 }
1983                         }
1984
1985                         if (put_len) {
1986                                 dma_sync_single_for_cpu(pdata->dev,
1987                                                         rdata->rx.buf.dma,
1988                                                         rdata->rx.buf.dma_len,
1989                                                         DMA_FROM_DEVICE);
1990
1991                                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1992                                                 rdata->rx.buf.pa.pages,
1993                                                 rdata->rx.buf.pa.pages_offset,
1994                                                 put_len, rdata->rx.buf.dma_len);
1995                                 rdata->rx.buf.pa.pages = NULL;
1996                         }
1997                 }
1998
1999 skip_data:
2000                 if (incomplete || context_next)
2001                         goto read_again;
2002
2003                 if (!skb)
2004                         goto next_packet;
2005
2006                 /* Be sure we don't exceed the configured MTU */
2007                 max_len = netdev->mtu + ETH_HLEN;
2008                 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2009                     (skb->protocol == htons(ETH_P_8021Q)))
2010                         max_len += VLAN_HLEN;
2011
2012                 if (skb->len > max_len) {
2013                         DBGPR("packet length exceeds configured MTU\n");
2014                         dev_kfree_skb(skb);
2015                         goto next_packet;
2016                 }
2017
2018 #ifdef XGMAC_ENABLE_RX_PKT_DUMP
2019                 xgbe_print_pkt(netdev, skb, false);
2020 #endif
2021
2022                 skb_checksum_none_assert(skb);
2023                 if (XGMAC_GET_BITS(packet->attributes,
2024                                    RX_PACKET_ATTRIBUTES, CSUM_DONE))
2025                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2026
2027                 if (XGMAC_GET_BITS(packet->attributes,
2028                                    RX_PACKET_ATTRIBUTES, VLAN_CTAG))
2029                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2030                                                packet->vlan_ctag);
2031
2032                 if (XGMAC_GET_BITS(packet->attributes,
2033                                    RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2034                         u64 nsec;
2035
2036                         nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2037                                                     packet->rx_tstamp);
2038                         hwtstamps = skb_hwtstamps(skb);
2039                         hwtstamps->hwtstamp = ns_to_ktime(nsec);
2040                 }
2041
2042                 if (XGMAC_GET_BITS(packet->attributes,
2043                                    RX_PACKET_ATTRIBUTES, RSS_HASH))
2044                         skb_set_hash(skb, packet->rss_hash,
2045                                      packet->rss_hash_type);
2046
2047                 skb->dev = netdev;
2048                 skb->protocol = eth_type_trans(skb, netdev);
2049                 skb_record_rx_queue(skb, channel->queue_index);
2050                 skb_mark_napi_id(skb, napi);
2051
2052                 netdev->last_rx = jiffies;
2053                 napi_gro_receive(napi, skb);
2054
2055 next_packet:
2056                 packet_count++;
2057         }
2058
2059         /* Check if we need to save state before leaving */
2060         if (received && (incomplete || context_next)) {
2061                 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2062                 rdata->state_saved = 1;
2063                 rdata->state.incomplete = incomplete;
2064                 rdata->state.context_next = context_next;
2065                 rdata->state.skb = skb;
2066                 rdata->state.len = len;
2067                 rdata->state.error = error;
2068         }
2069
2070         DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2071
2072         return packet_count;
2073 }
2074
2075 static int xgbe_one_poll(struct napi_struct *napi, int budget)
2076 {
2077         struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2078                                                     napi);
2079         int processed = 0;
2080
2081         DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2082
2083         /* Cleanup Tx ring first */
2084         xgbe_tx_poll(channel);
2085
2086         /* Process Rx ring next */
2087         processed = xgbe_rx_poll(channel, budget);
2088
2089         /* If we processed everything, we are done */
2090         if (processed < budget) {
2091                 /* Turn off polling */
2092                 napi_complete(napi);
2093
2094                 /* Enable Tx and Rx interrupts */
2095                 enable_irq(channel->dma_irq);
2096         }
2097
2098         DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2099
2100         return processed;
2101 }
2102
2103 static int xgbe_all_poll(struct napi_struct *napi, int budget)
2104 {
2105         struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2106                                                    napi);
2107         struct xgbe_channel *channel;
2108         int ring_budget;
2109         int processed, last_processed;
2110         unsigned int i;
2111
2112         DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2113
2114         processed = 0;
2115         ring_budget = budget / pdata->rx_ring_count;
2116         do {
2117                 last_processed = processed;
2118
2119                 channel = pdata->channel;
2120                 for (i = 0; i < pdata->channel_count; i++, channel++) {
2121                         /* Cleanup Tx ring first */
2122                         xgbe_tx_poll(channel);
2123
2124                         /* Process Rx ring next */
2125                         if (ring_budget > (budget - processed))
2126                                 ring_budget = budget - processed;
2127                         processed += xgbe_rx_poll(channel, ring_budget);
2128                 }
2129         } while ((processed < budget) && (processed != last_processed));
2130
2131         /* If we processed everything, we are done */
2132         if (processed < budget) {
2133                 /* Turn off polling */
2134                 napi_complete(napi);
2135
2136                 /* Enable Tx and Rx interrupts */
2137                 xgbe_enable_rx_tx_ints(pdata);
2138         }
2139
2140         DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2141
2142         return processed;
2143 }
2144
2145 void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
2146                        unsigned int count, unsigned int flag)
2147 {
2148         struct xgbe_ring_data *rdata;
2149         struct xgbe_ring_desc *rdesc;
2150
2151         while (count--) {
2152                 rdata = XGBE_GET_DESC_DATA(ring, idx);
2153                 rdesc = rdata->rdesc;
2154                 pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2155                          (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2156                          le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
2157                          le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
2158                 idx++;
2159         }
2160 }
2161
2162 void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
2163                        unsigned int idx)
2164 {
2165         pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
2166                  le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
2167                  le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
2168 }
2169
2170 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
2171 {
2172         struct ethhdr *eth = (struct ethhdr *)skb->data;
2173         unsigned char *buf = skb->data;
2174         unsigned char buffer[128];
2175         unsigned int i, j;
2176
2177         netdev_alert(netdev, "\n************** SKB dump ****************\n");
2178
2179         netdev_alert(netdev, "%s packet of %d bytes\n",
2180                      (tx_rx ? "TX" : "RX"), skb->len);
2181
2182         netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
2183         netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
2184         netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
2185
2186         for (i = 0, j = 0; i < skb->len;) {
2187                 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
2188                               buf[i++]);
2189
2190                 if ((i % 32) == 0) {
2191                         netdev_alert(netdev, "  0x%04x: %s\n", i - 32, buffer);
2192                         j = 0;
2193                 } else if ((i % 16) == 0) {
2194                         buffer[j++] = ' ';
2195                         buffer[j++] = ' ';
2196                 } else if ((i % 4) == 0) {
2197                         buffer[j++] = ' ';
2198                 }
2199         }
2200         if (i % 32)
2201                 netdev_alert(netdev, "  0x%04x: %s\n", i - (i % 32), buffer);
2202
2203         netdev_alert(netdev, "\n************** SKB dump ****************\n");
2204 }