Merge tag 'armsoc-defconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[cascardo/linux.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *          Ravi Patel <rapatel@apm.com>
6  *          Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 #include "xgene_enet_sgmac.h"
25 #include "xgene_enet_xgmac.h"
26
27 #define RES_ENET_CSR    0
28 #define RES_RING_CSR    1
29 #define RES_RING_CMD    2
30
31 static const struct of_device_id xgene_enet_of_match[];
32 static const struct acpi_device_id xgene_enet_acpi_match[];
33
34 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
35 {
36         struct xgene_enet_raw_desc16 *raw_desc;
37         int i;
38
39         for (i = 0; i < buf_pool->slots; i++) {
40                 raw_desc = &buf_pool->raw_desc16[i];
41
42                 /* Hardware expects descriptor in little endian format */
43                 raw_desc->m0 = cpu_to_le64(i |
44                                 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
45                                 SET_VAL(STASH, 3));
46         }
47 }
48
49 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
50                                      u32 nbuf)
51 {
52         struct sk_buff *skb;
53         struct xgene_enet_raw_desc16 *raw_desc;
54         struct xgene_enet_pdata *pdata;
55         struct net_device *ndev;
56         struct device *dev;
57         dma_addr_t dma_addr;
58         u32 tail = buf_pool->tail;
59         u32 slots = buf_pool->slots - 1;
60         u16 bufdatalen, len;
61         int i;
62
63         ndev = buf_pool->ndev;
64         dev = ndev_to_dev(buf_pool->ndev);
65         pdata = netdev_priv(ndev);
66         bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67         len = XGENE_ENET_MAX_MTU;
68
69         for (i = 0; i < nbuf; i++) {
70                 raw_desc = &buf_pool->raw_desc16[tail];
71
72                 skb = netdev_alloc_skb_ip_align(ndev, len);
73                 if (unlikely(!skb))
74                         return -ENOMEM;
75                 buf_pool->rx_skb[tail] = skb;
76
77                 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78                 if (dma_mapping_error(dev, dma_addr)) {
79                         netdev_err(ndev, "DMA mapping error\n");
80                         dev_kfree_skb_any(skb);
81                         return -EINVAL;
82                 }
83
84                 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85                                            SET_VAL(BUFDATALEN, bufdatalen) |
86                                            SET_BIT(COHERENT));
87                 tail = (tail + 1) & slots;
88         }
89
90         pdata->ring_ops->wr_cmd(buf_pool, nbuf);
91         buf_pool->tail = tail;
92
93         return 0;
94 }
95
96 static u8 xgene_enet_hdr_len(const void *data)
97 {
98         const struct ethhdr *eth = data;
99
100         return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
101 }
102
103 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
104 {
105         int i;
106
107         /* Free up the buffers held by hardware */
108         for (i = 0; i < buf_pool->slots; i++) {
109                 if (buf_pool->rx_skb[i])
110                         dev_kfree_skb_any(buf_pool->rx_skb[i]);
111         }
112 }
113
114 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
115 {
116         struct xgene_enet_desc_ring *rx_ring = data;
117
118         if (napi_schedule_prep(&rx_ring->napi)) {
119                 disable_irq_nosync(irq);
120                 __napi_schedule(&rx_ring->napi);
121         }
122
123         return IRQ_HANDLED;
124 }
125
126 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
127                                     struct xgene_enet_raw_desc *raw_desc)
128 {
129         struct sk_buff *skb;
130         struct device *dev;
131         skb_frag_t *frag;
132         dma_addr_t *frag_dma_addr;
133         u16 skb_index;
134         u8 status;
135         int i, ret = 0;
136
137         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
138         skb = cp_ring->cp_skb[skb_index];
139         frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
140
141         dev = ndev_to_dev(cp_ring->ndev);
142         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
143                          skb_headlen(skb),
144                          DMA_TO_DEVICE);
145
146         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
147                 frag = &skb_shinfo(skb)->frags[i];
148                 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
149                                DMA_TO_DEVICE);
150         }
151
152         /* Checking for error */
153         status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
154         if (unlikely(status > 2)) {
155                 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
156                                        status);
157                 ret = -EIO;
158         }
159
160         if (likely(skb)) {
161                 dev_kfree_skb_any(skb);
162         } else {
163                 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
164                 ret = -EIO;
165         }
166
167         return ret;
168 }
169
170 static u64 xgene_enet_work_msg(struct sk_buff *skb)
171 {
172         struct net_device *ndev = skb->dev;
173         struct iphdr *iph;
174         u8 l3hlen = 0, l4hlen = 0;
175         u8 ethhdr, proto = 0, csum_enable = 0;
176         u64 hopinfo = 0;
177         u32 hdr_len, mss = 0;
178         u32 i, len, nr_frags;
179
180         ethhdr = xgene_enet_hdr_len(skb->data);
181
182         if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
183             unlikely(skb->protocol != htons(ETH_P_8021Q)))
184                 goto out;
185
186         if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
187                 goto out;
188
189         iph = ip_hdr(skb);
190         if (unlikely(ip_is_fragment(iph)))
191                 goto out;
192
193         if (likely(iph->protocol == IPPROTO_TCP)) {
194                 l4hlen = tcp_hdrlen(skb) >> 2;
195                 csum_enable = 1;
196                 proto = TSO_IPPROTO_TCP;
197                 if (ndev->features & NETIF_F_TSO) {
198                         hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
199                         mss = skb_shinfo(skb)->gso_size;
200
201                         if (skb_is_nonlinear(skb)) {
202                                 len = skb_headlen(skb);
203                                 nr_frags = skb_shinfo(skb)->nr_frags;
204
205                                 for (i = 0; i < 2 && i < nr_frags; i++)
206                                         len += skb_shinfo(skb)->frags[i].size;
207
208                                 /* HW requires header must reside in 3 buffer */
209                                 if (unlikely(hdr_len > len)) {
210                                         if (skb_linearize(skb))
211                                                 return 0;
212                                 }
213                         }
214
215                         if (!mss || ((skb->len - hdr_len) <= mss))
216                                 goto out;
217
218                         hopinfo |= SET_BIT(ET);
219                 }
220         } else if (iph->protocol == IPPROTO_UDP) {
221                 l4hlen = UDP_HDR_SIZE;
222                 csum_enable = 1;
223         }
224 out:
225         l3hlen = ip_hdrlen(skb) >> 2;
226         hopinfo |= SET_VAL(TCPHDR, l4hlen) |
227                   SET_VAL(IPHDR, l3hlen) |
228                   SET_VAL(ETHHDR, ethhdr) |
229                   SET_VAL(EC, csum_enable) |
230                   SET_VAL(IS, proto) |
231                   SET_BIT(IC) |
232                   SET_BIT(TYPE_ETH_WORK_MESSAGE);
233
234         return hopinfo;
235 }
236
237 static u16 xgene_enet_encode_len(u16 len)
238 {
239         return (len == BUFLEN_16K) ? 0 : len;
240 }
241
242 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
243 {
244         desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
245                                     SET_VAL(BUFDATALEN, len));
246 }
247
248 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
249 {
250         __le64 *exp_bufs;
251
252         exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
253         memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
254         ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
255
256         return exp_bufs;
257 }
258
259 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
260 {
261         return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
262 }
263
264 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
265                                     struct sk_buff *skb)
266 {
267         struct device *dev = ndev_to_dev(tx_ring->ndev);
268         struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
269         struct xgene_enet_raw_desc *raw_desc;
270         __le64 *exp_desc = NULL, *exp_bufs = NULL;
271         dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
272         skb_frag_t *frag;
273         u16 tail = tx_ring->tail;
274         u64 hopinfo;
275         u32 len, hw_len;
276         u8 ll = 0, nv = 0, idx = 0;
277         bool split = false;
278         u32 size, offset, ell_bytes = 0;
279         u32 i, fidx, nr_frags, count = 1;
280
281         raw_desc = &tx_ring->raw_desc[tail];
282         tail = (tail + 1) & (tx_ring->slots - 1);
283         memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
284
285         hopinfo = xgene_enet_work_msg(skb);
286         if (!hopinfo)
287                 return -EINVAL;
288         raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
289                                    hopinfo);
290
291         len = skb_headlen(skb);
292         hw_len = xgene_enet_encode_len(len);
293
294         dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
295         if (dma_mapping_error(dev, dma_addr)) {
296                 netdev_err(tx_ring->ndev, "DMA mapping error\n");
297                 return -EINVAL;
298         }
299
300         /* Hardware expects descriptor in little endian format */
301         raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
302                                    SET_VAL(BUFDATALEN, hw_len) |
303                                    SET_BIT(COHERENT));
304
305         if (!skb_is_nonlinear(skb))
306                 goto out;
307
308         /* scatter gather */
309         nv = 1;
310         exp_desc = (void *)&tx_ring->raw_desc[tail];
311         tail = (tail + 1) & (tx_ring->slots - 1);
312         memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
313
314         nr_frags = skb_shinfo(skb)->nr_frags;
315         for (i = nr_frags; i < 4 ; i++)
316                 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
317
318         frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
319
320         for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
321                 if (!split) {
322                         frag = &skb_shinfo(skb)->frags[fidx];
323                         size = skb_frag_size(frag);
324                         offset = 0;
325
326                         pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
327                                                      DMA_TO_DEVICE);
328                         if (dma_mapping_error(dev, pbuf_addr))
329                                 return -EINVAL;
330
331                         frag_dma_addr[fidx] = pbuf_addr;
332                         fidx++;
333
334                         if (size > BUFLEN_16K)
335                                 split = true;
336                 }
337
338                 if (size > BUFLEN_16K) {
339                         len = BUFLEN_16K;
340                         size -= BUFLEN_16K;
341                 } else {
342                         len = size;
343                         split = false;
344                 }
345
346                 dma_addr = pbuf_addr + offset;
347                 hw_len = xgene_enet_encode_len(len);
348
349                 switch (i) {
350                 case 0:
351                 case 1:
352                 case 2:
353                         xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
354                         break;
355                 case 3:
356                         if (split || (fidx != nr_frags)) {
357                                 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
358                                 xgene_set_addr_len(exp_bufs, idx, dma_addr,
359                                                    hw_len);
360                                 idx++;
361                                 ell_bytes += len;
362                         } else {
363                                 xgene_set_addr_len(exp_desc, i, dma_addr,
364                                                    hw_len);
365                         }
366                         break;
367                 default:
368                         xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
369                         idx++;
370                         ell_bytes += len;
371                         break;
372                 }
373
374                 if (split)
375                         offset += BUFLEN_16K;
376         }
377         count++;
378
379         if (idx) {
380                 ll = 1;
381                 dma_addr = dma_map_single(dev, exp_bufs,
382                                           sizeof(u64) * MAX_EXP_BUFFS,
383                                           DMA_TO_DEVICE);
384                 if (dma_mapping_error(dev, dma_addr)) {
385                         dev_kfree_skb_any(skb);
386                         return -EINVAL;
387                 }
388                 i = ell_bytes >> LL_BYTES_LSB_LEN;
389                 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
390                                           SET_VAL(LL_BYTES_MSB, i) |
391                                           SET_VAL(LL_LEN, idx));
392                 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
393         }
394
395 out:
396         raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
397                                    SET_VAL(USERINFO, tx_ring->tail));
398         tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
399         pdata->tx_level[tx_ring->cp_ring->index] += count;
400         tx_ring->tail = tail;
401
402         return count;
403 }
404
405 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
406                                          struct net_device *ndev)
407 {
408         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
409         struct xgene_enet_desc_ring *tx_ring;
410         int index = skb->queue_mapping;
411         u32 tx_level = pdata->tx_level[index];
412         int count;
413
414         tx_ring = pdata->tx_ring[index];
415         if (tx_level < pdata->txc_level[index])
416                 tx_level += ((typeof(pdata->tx_level[index]))~0U);
417
418         if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
419                 netif_stop_subqueue(ndev, index);
420                 return NETDEV_TX_BUSY;
421         }
422
423         if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
424                 return NETDEV_TX_OK;
425
426         count = xgene_enet_setup_tx_desc(tx_ring, skb);
427         if (count <= 0) {
428                 dev_kfree_skb_any(skb);
429                 return NETDEV_TX_OK;
430         }
431
432         skb_tx_timestamp(skb);
433
434         tx_ring->tx_packets++;
435         tx_ring->tx_bytes += skb->len;
436
437         pdata->ring_ops->wr_cmd(tx_ring, count);
438         return NETDEV_TX_OK;
439 }
440
441 static void xgene_enet_skip_csum(struct sk_buff *skb)
442 {
443         struct iphdr *iph = ip_hdr(skb);
444
445         if (!ip_is_fragment(iph) ||
446             (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
447                 skb->ip_summed = CHECKSUM_UNNECESSARY;
448         }
449 }
450
451 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
452                                struct xgene_enet_raw_desc *raw_desc)
453 {
454         struct net_device *ndev;
455         struct xgene_enet_pdata *pdata;
456         struct device *dev;
457         struct xgene_enet_desc_ring *buf_pool;
458         u32 datalen, skb_index;
459         struct sk_buff *skb;
460         u8 status;
461         int ret = 0;
462
463         ndev = rx_ring->ndev;
464         pdata = netdev_priv(ndev);
465         dev = ndev_to_dev(rx_ring->ndev);
466         buf_pool = rx_ring->buf_pool;
467
468         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
469                          XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
470         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
471         skb = buf_pool->rx_skb[skb_index];
472         buf_pool->rx_skb[skb_index] = NULL;
473
474         /* checking for error */
475         status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
476                   GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
477         if (unlikely(status > 2)) {
478                 dev_kfree_skb_any(skb);
479                 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
480                                        status);
481                 ret = -EIO;
482                 goto out;
483         }
484
485         /* strip off CRC as HW isn't doing this */
486         datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
487         datalen = (datalen & DATALEN_MASK) - 4;
488         prefetch(skb->data - NET_IP_ALIGN);
489         skb_put(skb, datalen);
490
491         skb_checksum_none_assert(skb);
492         skb->protocol = eth_type_trans(skb, ndev);
493         if (likely((ndev->features & NETIF_F_IP_CSUM) &&
494                    skb->protocol == htons(ETH_P_IP))) {
495                 xgene_enet_skip_csum(skb);
496         }
497
498         rx_ring->rx_packets++;
499         rx_ring->rx_bytes += datalen;
500         napi_gro_receive(&rx_ring->napi, skb);
501 out:
502         if (--rx_ring->nbufpool == 0) {
503                 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
504                 rx_ring->nbufpool = NUM_BUFPOOL;
505         }
506
507         return ret;
508 }
509
510 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
511 {
512         return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
513 }
514
515 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
516                                    int budget)
517 {
518         struct net_device *ndev = ring->ndev;
519         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
520         struct xgene_enet_raw_desc *raw_desc, *exp_desc;
521         u16 head = ring->head;
522         u16 slots = ring->slots - 1;
523         int ret, desc_count, count = 0, processed = 0;
524         bool is_completion;
525
526         do {
527                 raw_desc = &ring->raw_desc[head];
528                 desc_count = 0;
529                 is_completion = false;
530                 exp_desc = NULL;
531                 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
532                         break;
533
534                 /* read fpqnum field after dataaddr field */
535                 dma_rmb();
536                 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
537                         head = (head + 1) & slots;
538                         exp_desc = &ring->raw_desc[head];
539
540                         if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
541                                 head = (head - 1) & slots;
542                                 break;
543                         }
544                         dma_rmb();
545                         count++;
546                         desc_count++;
547                 }
548                 if (is_rx_desc(raw_desc)) {
549                         ret = xgene_enet_rx_frame(ring, raw_desc);
550                 } else {
551                         ret = xgene_enet_tx_completion(ring, raw_desc);
552                         is_completion = true;
553                 }
554                 xgene_enet_mark_desc_slot_empty(raw_desc);
555                 if (exp_desc)
556                         xgene_enet_mark_desc_slot_empty(exp_desc);
557
558                 head = (head + 1) & slots;
559                 count++;
560                 desc_count++;
561                 processed++;
562                 if (is_completion)
563                         pdata->txc_level[ring->index] += desc_count;
564
565                 if (ret)
566                         break;
567         } while (--budget);
568
569         if (likely(count)) {
570                 pdata->ring_ops->wr_cmd(ring, -count);
571                 ring->head = head;
572
573                 if (__netif_subqueue_stopped(ndev, ring->index))
574                         netif_start_subqueue(ndev, ring->index);
575         }
576
577         return processed;
578 }
579
580 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
581 {
582         struct xgene_enet_desc_ring *ring;
583         int processed;
584
585         ring = container_of(napi, struct xgene_enet_desc_ring, napi);
586         processed = xgene_enet_process_ring(ring, budget);
587
588         if (processed != budget) {
589                 napi_complete(napi);
590                 enable_irq(ring->irq);
591         }
592
593         return processed;
594 }
595
596 static void xgene_enet_timeout(struct net_device *ndev)
597 {
598         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
599         struct netdev_queue *txq;
600         int i;
601
602         pdata->mac_ops->reset(pdata);
603
604         for (i = 0; i < pdata->txq_cnt; i++) {
605                 txq = netdev_get_tx_queue(ndev, i);
606                 txq->trans_start = jiffies;
607                 netif_tx_start_queue(txq);
608         }
609 }
610
611 static void xgene_enet_set_irq_name(struct net_device *ndev)
612 {
613         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
614         struct xgene_enet_desc_ring *ring;
615         int i;
616
617         for (i = 0; i < pdata->rxq_cnt; i++) {
618                 ring = pdata->rx_ring[i];
619                 if (!pdata->cq_cnt) {
620                         snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
621                                  ndev->name);
622                 } else {
623                         snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
624                                  ndev->name, i);
625                 }
626         }
627
628         for (i = 0; i < pdata->cq_cnt; i++) {
629                 ring = pdata->tx_ring[i]->cp_ring;
630                 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
631                          ndev->name, i);
632         }
633 }
634
635 static int xgene_enet_register_irq(struct net_device *ndev)
636 {
637         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
638         struct device *dev = ndev_to_dev(ndev);
639         struct xgene_enet_desc_ring *ring;
640         int ret = 0, i;
641
642         xgene_enet_set_irq_name(ndev);
643         for (i = 0; i < pdata->rxq_cnt; i++) {
644                 ring = pdata->rx_ring[i];
645                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
646                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
647                                        0, ring->irq_name, ring);
648                 if (ret) {
649                         netdev_err(ndev, "Failed to request irq %s\n",
650                                    ring->irq_name);
651                 }
652         }
653
654         for (i = 0; i < pdata->cq_cnt; i++) {
655                 ring = pdata->tx_ring[i]->cp_ring;
656                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
657                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
658                                        0, ring->irq_name, ring);
659                 if (ret) {
660                         netdev_err(ndev, "Failed to request irq %s\n",
661                                    ring->irq_name);
662                 }
663         }
664
665         return ret;
666 }
667
668 static void xgene_enet_free_irq(struct net_device *ndev)
669 {
670         struct xgene_enet_pdata *pdata;
671         struct xgene_enet_desc_ring *ring;
672         struct device *dev;
673         int i;
674
675         pdata = netdev_priv(ndev);
676         dev = ndev_to_dev(ndev);
677
678         for (i = 0; i < pdata->rxq_cnt; i++) {
679                 ring = pdata->rx_ring[i];
680                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
681                 devm_free_irq(dev, ring->irq, ring);
682         }
683
684         for (i = 0; i < pdata->cq_cnt; i++) {
685                 ring = pdata->tx_ring[i]->cp_ring;
686                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
687                 devm_free_irq(dev, ring->irq, ring);
688         }
689 }
690
691 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
692 {
693         struct napi_struct *napi;
694         int i;
695
696         for (i = 0; i < pdata->rxq_cnt; i++) {
697                 napi = &pdata->rx_ring[i]->napi;
698                 napi_enable(napi);
699         }
700
701         for (i = 0; i < pdata->cq_cnt; i++) {
702                 napi = &pdata->tx_ring[i]->cp_ring->napi;
703                 napi_enable(napi);
704         }
705 }
706
707 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
708 {
709         struct napi_struct *napi;
710         int i;
711
712         for (i = 0; i < pdata->rxq_cnt; i++) {
713                 napi = &pdata->rx_ring[i]->napi;
714                 napi_disable(napi);
715         }
716
717         for (i = 0; i < pdata->cq_cnt; i++) {
718                 napi = &pdata->tx_ring[i]->cp_ring->napi;
719                 napi_disable(napi);
720         }
721 }
722
723 static int xgene_enet_open(struct net_device *ndev)
724 {
725         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
726         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
727         int ret;
728
729         ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
730         if (ret)
731                 return ret;
732
733         ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
734         if (ret)
735                 return ret;
736
737         xgene_enet_napi_enable(pdata);
738         ret = xgene_enet_register_irq(ndev);
739         if (ret)
740                 return ret;
741
742         if (pdata->phy_dev) {
743                 phy_start(pdata->phy_dev);
744         } else {
745                 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
746                 netif_carrier_off(ndev);
747         }
748
749         mac_ops->tx_enable(pdata);
750         mac_ops->rx_enable(pdata);
751         netif_tx_start_all_queues(ndev);
752
753         return ret;
754 }
755
756 static int xgene_enet_close(struct net_device *ndev)
757 {
758         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
759         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
760         int i;
761
762         netif_tx_stop_all_queues(ndev);
763         mac_ops->tx_disable(pdata);
764         mac_ops->rx_disable(pdata);
765
766         if (pdata->phy_dev)
767                 phy_stop(pdata->phy_dev);
768         else
769                 cancel_delayed_work_sync(&pdata->link_work);
770
771         xgene_enet_free_irq(ndev);
772         xgene_enet_napi_disable(pdata);
773         for (i = 0; i < pdata->rxq_cnt; i++)
774                 xgene_enet_process_ring(pdata->rx_ring[i], -1);
775
776         return 0;
777 }
778 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
779 {
780         struct xgene_enet_pdata *pdata;
781         struct device *dev;
782
783         pdata = netdev_priv(ring->ndev);
784         dev = ndev_to_dev(ring->ndev);
785
786         pdata->ring_ops->clear(ring);
787         dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
788 }
789
790 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
791 {
792         struct xgene_enet_desc_ring *buf_pool;
793         struct xgene_enet_desc_ring *ring;
794         int i;
795
796         for (i = 0; i < pdata->txq_cnt; i++) {
797                 ring = pdata->tx_ring[i];
798                 if (ring) {
799                         xgene_enet_delete_ring(ring);
800                         pdata->port_ops->clear(pdata, ring);
801                         if (pdata->cq_cnt)
802                                 xgene_enet_delete_ring(ring->cp_ring);
803                         pdata->tx_ring[i] = NULL;
804                 }
805         }
806
807         for (i = 0; i < pdata->rxq_cnt; i++) {
808                 ring = pdata->rx_ring[i];
809                 if (ring) {
810                         buf_pool = ring->buf_pool;
811                         xgene_enet_delete_bufpool(buf_pool);
812                         xgene_enet_delete_ring(buf_pool);
813                         pdata->port_ops->clear(pdata, buf_pool);
814                         xgene_enet_delete_ring(ring);
815                         pdata->rx_ring[i] = NULL;
816                 }
817         }
818 }
819
820 static int xgene_enet_get_ring_size(struct device *dev,
821                                     enum xgene_enet_ring_cfgsize cfgsize)
822 {
823         int size = -EINVAL;
824
825         switch (cfgsize) {
826         case RING_CFGSIZE_512B:
827                 size = 0x200;
828                 break;
829         case RING_CFGSIZE_2KB:
830                 size = 0x800;
831                 break;
832         case RING_CFGSIZE_16KB:
833                 size = 0x4000;
834                 break;
835         case RING_CFGSIZE_64KB:
836                 size = 0x10000;
837                 break;
838         case RING_CFGSIZE_512KB:
839                 size = 0x80000;
840                 break;
841         default:
842                 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
843                 break;
844         }
845
846         return size;
847 }
848
849 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
850 {
851         struct xgene_enet_pdata *pdata;
852         struct device *dev;
853
854         if (!ring)
855                 return;
856
857         dev = ndev_to_dev(ring->ndev);
858         pdata = netdev_priv(ring->ndev);
859
860         if (ring->desc_addr) {
861                 pdata->ring_ops->clear(ring);
862                 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
863         }
864         devm_kfree(dev, ring);
865 }
866
867 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
868 {
869         struct device *dev = &pdata->pdev->dev;
870         struct xgene_enet_desc_ring *ring;
871         int i;
872
873         for (i = 0; i < pdata->txq_cnt; i++) {
874                 ring = pdata->tx_ring[i];
875                 if (ring) {
876                         if (ring->cp_ring && ring->cp_ring->cp_skb)
877                                 devm_kfree(dev, ring->cp_ring->cp_skb);
878                         if (ring->cp_ring && pdata->cq_cnt)
879                                 xgene_enet_free_desc_ring(ring->cp_ring);
880                         xgene_enet_free_desc_ring(ring);
881                 }
882         }
883
884         for (i = 0; i < pdata->rxq_cnt; i++) {
885                 ring = pdata->rx_ring[i];
886                 if (ring) {
887                         if (ring->buf_pool) {
888                                 if (ring->buf_pool->rx_skb)
889                                         devm_kfree(dev, ring->buf_pool->rx_skb);
890                                 xgene_enet_free_desc_ring(ring->buf_pool);
891                         }
892                         xgene_enet_free_desc_ring(ring);
893                 }
894         }
895 }
896
897 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
898                                  struct xgene_enet_desc_ring *ring)
899 {
900         if ((pdata->enet_id == XGENE_ENET2) &&
901             (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
902                 return true;
903         }
904
905         return false;
906 }
907
908 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
909                                               struct xgene_enet_desc_ring *ring)
910 {
911         u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
912
913         return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
914 }
915
916 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
917                         struct net_device *ndev, u32 ring_num,
918                         enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
919 {
920         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
921         struct device *dev = ndev_to_dev(ndev);
922         struct xgene_enet_desc_ring *ring;
923         void *irq_mbox_addr;
924         int size;
925
926         size = xgene_enet_get_ring_size(dev, cfgsize);
927         if (size < 0)
928                 return NULL;
929
930         ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
931                             GFP_KERNEL);
932         if (!ring)
933                 return NULL;
934
935         ring->ndev = ndev;
936         ring->num = ring_num;
937         ring->cfgsize = cfgsize;
938         ring->id = ring_id;
939
940         ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
941                                               GFP_KERNEL | __GFP_ZERO);
942         if (!ring->desc_addr) {
943                 devm_kfree(dev, ring);
944                 return NULL;
945         }
946         ring->size = size;
947
948         if (is_irq_mbox_required(pdata, ring)) {
949                 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
950                                                     &ring->irq_mbox_dma,
951                                                     GFP_KERNEL | __GFP_ZERO);
952                 if (!irq_mbox_addr) {
953                         dmam_free_coherent(dev, size, ring->desc_addr,
954                                            ring->dma);
955                         devm_kfree(dev, ring);
956                         return NULL;
957                 }
958                 ring->irq_mbox_addr = irq_mbox_addr;
959         }
960
961         ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
962         ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
963         ring = pdata->ring_ops->setup(ring);
964         netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
965                    ring->num, ring->size, ring->id, ring->slots);
966
967         return ring;
968 }
969
970 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
971 {
972         return (owner << 6) | (bufnum & GENMASK(5, 0));
973 }
974
975 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
976 {
977         enum xgene_ring_owner owner;
978
979         if (p->enet_id == XGENE_ENET1) {
980                 switch (p->phy_mode) {
981                 case PHY_INTERFACE_MODE_SGMII:
982                         owner = RING_OWNER_ETH0;
983                         break;
984                 default:
985                         owner = (!p->port_id) ? RING_OWNER_ETH0 :
986                                                 RING_OWNER_ETH1;
987                         break;
988                 }
989         } else {
990                 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
991         }
992
993         return owner;
994 }
995
996 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
997 {
998         struct device *dev = &pdata->pdev->dev;
999         u32 cpu_bufnum;
1000         int ret;
1001
1002         ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1003
1004         return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1005 }
1006
1007 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1008 {
1009         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1010         struct device *dev = ndev_to_dev(ndev);
1011         struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1012         struct xgene_enet_desc_ring *buf_pool = NULL;
1013         enum xgene_ring_owner owner;
1014         dma_addr_t dma_exp_bufs;
1015         u8 cpu_bufnum;
1016         u8 eth_bufnum = pdata->eth_bufnum;
1017         u8 bp_bufnum = pdata->bp_bufnum;
1018         u16 ring_num = pdata->ring_num;
1019         __le64 *exp_bufs;
1020         u16 ring_id;
1021         int i, ret, size;
1022
1023         cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1024
1025         for (i = 0; i < pdata->rxq_cnt; i++) {
1026                 /* allocate rx descriptor ring */
1027                 owner = xgene_derive_ring_owner(pdata);
1028                 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1029                 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1030                                                       RING_CFGSIZE_16KB,
1031                                                       ring_id);
1032                 if (!rx_ring) {
1033                         ret = -ENOMEM;
1034                         goto err;
1035                 }
1036
1037                 /* allocate buffer pool for receiving packets */
1038                 owner = xgene_derive_ring_owner(pdata);
1039                 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1040                 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1041                                                        RING_CFGSIZE_2KB,
1042                                                        ring_id);
1043                 if (!buf_pool) {
1044                         ret = -ENOMEM;
1045                         goto err;
1046                 }
1047
1048                 rx_ring->nbufpool = NUM_BUFPOOL;
1049                 rx_ring->buf_pool = buf_pool;
1050                 rx_ring->irq = pdata->irqs[i];
1051                 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1052                                                 sizeof(struct sk_buff *),
1053                                                 GFP_KERNEL);
1054                 if (!buf_pool->rx_skb) {
1055                         ret = -ENOMEM;
1056                         goto err;
1057                 }
1058
1059                 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1060                 rx_ring->buf_pool = buf_pool;
1061                 pdata->rx_ring[i] = rx_ring;
1062         }
1063
1064         for (i = 0; i < pdata->txq_cnt; i++) {
1065                 /* allocate tx descriptor ring */
1066                 owner = xgene_derive_ring_owner(pdata);
1067                 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1068                 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1069                                                       RING_CFGSIZE_16KB,
1070                                                       ring_id);
1071                 if (!tx_ring) {
1072                         ret = -ENOMEM;
1073                         goto err;
1074                 }
1075
1076                 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1077                 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1078                                                GFP_KERNEL | __GFP_ZERO);
1079                 if (!exp_bufs) {
1080                         ret = -ENOMEM;
1081                         goto err;
1082                 }
1083                 tx_ring->exp_bufs = exp_bufs;
1084
1085                 pdata->tx_ring[i] = tx_ring;
1086
1087                 if (!pdata->cq_cnt) {
1088                         cp_ring = pdata->rx_ring[i];
1089                 } else {
1090                         /* allocate tx completion descriptor ring */
1091                         ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1092                                                          cpu_bufnum++);
1093                         cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1094                                                               RING_CFGSIZE_16KB,
1095                                                               ring_id);
1096                         if (!cp_ring) {
1097                                 ret = -ENOMEM;
1098                                 goto err;
1099                         }
1100
1101                         cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1102                         cp_ring->index = i;
1103                 }
1104
1105                 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1106                                                sizeof(struct sk_buff *),
1107                                                GFP_KERNEL);
1108                 if (!cp_ring->cp_skb) {
1109                         ret = -ENOMEM;
1110                         goto err;
1111                 }
1112
1113                 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1114                 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1115                                                       size, GFP_KERNEL);
1116                 if (!cp_ring->frag_dma_addr) {
1117                         devm_kfree(dev, cp_ring->cp_skb);
1118                         ret = -ENOMEM;
1119                         goto err;
1120                 }
1121
1122                 tx_ring->cp_ring = cp_ring;
1123                 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1124         }
1125
1126         pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1127         pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1128
1129         return 0;
1130
1131 err:
1132         xgene_enet_free_desc_rings(pdata);
1133         return ret;
1134 }
1135
1136 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1137                         struct net_device *ndev,
1138                         struct rtnl_link_stats64 *storage)
1139 {
1140         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1141         struct rtnl_link_stats64 *stats = &pdata->stats;
1142         struct xgene_enet_desc_ring *ring;
1143         int i;
1144
1145         memset(stats, 0, sizeof(struct rtnl_link_stats64));
1146         for (i = 0; i < pdata->txq_cnt; i++) {
1147                 ring = pdata->tx_ring[i];
1148                 if (ring) {
1149                         stats->tx_packets += ring->tx_packets;
1150                         stats->tx_bytes += ring->tx_bytes;
1151                 }
1152         }
1153
1154         for (i = 0; i < pdata->rxq_cnt; i++) {
1155                 ring = pdata->rx_ring[i];
1156                 if (ring) {
1157                         stats->rx_packets += ring->rx_packets;
1158                         stats->rx_bytes += ring->rx_bytes;
1159                         stats->rx_errors += ring->rx_length_errors +
1160                                 ring->rx_crc_errors +
1161                                 ring->rx_frame_errors +
1162                                 ring->rx_fifo_errors;
1163                         stats->rx_dropped += ring->rx_dropped;
1164                 }
1165         }
1166         memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
1167
1168         return storage;
1169 }
1170
1171 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1172 {
1173         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1174         int ret;
1175
1176         ret = eth_mac_addr(ndev, addr);
1177         if (ret)
1178                 return ret;
1179         pdata->mac_ops->set_mac_addr(pdata);
1180
1181         return ret;
1182 }
1183
1184 static const struct net_device_ops xgene_ndev_ops = {
1185         .ndo_open = xgene_enet_open,
1186         .ndo_stop = xgene_enet_close,
1187         .ndo_start_xmit = xgene_enet_start_xmit,
1188         .ndo_tx_timeout = xgene_enet_timeout,
1189         .ndo_get_stats64 = xgene_enet_get_stats64,
1190         .ndo_change_mtu = eth_change_mtu,
1191         .ndo_set_mac_address = xgene_enet_set_mac_address,
1192 };
1193
1194 #ifdef CONFIG_ACPI
1195 static void xgene_get_port_id_acpi(struct device *dev,
1196                                   struct xgene_enet_pdata *pdata)
1197 {
1198         acpi_status status;
1199         u64 temp;
1200
1201         status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1202         if (ACPI_FAILURE(status)) {
1203                 pdata->port_id = 0;
1204         } else {
1205                 pdata->port_id = temp;
1206         }
1207
1208         return;
1209 }
1210 #endif
1211
1212 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1213 {
1214         u32 id = 0;
1215
1216         of_property_read_u32(dev->of_node, "port-id", &id);
1217
1218         pdata->port_id = id & BIT(0);
1219
1220         return;
1221 }
1222
1223 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1224 {
1225         struct device *dev = &pdata->pdev->dev;
1226         int delay, ret;
1227
1228         ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1229         if (ret) {
1230                 pdata->tx_delay = 4;
1231                 return 0;
1232         }
1233
1234         if (delay < 0 || delay > 7) {
1235                 dev_err(dev, "Invalid tx-delay specified\n");
1236                 return -EINVAL;
1237         }
1238
1239         pdata->tx_delay = delay;
1240
1241         return 0;
1242 }
1243
1244 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1245 {
1246         struct device *dev = &pdata->pdev->dev;
1247         int delay, ret;
1248
1249         ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1250         if (ret) {
1251                 pdata->rx_delay = 2;
1252                 return 0;
1253         }
1254
1255         if (delay < 0 || delay > 7) {
1256                 dev_err(dev, "Invalid rx-delay specified\n");
1257                 return -EINVAL;
1258         }
1259
1260         pdata->rx_delay = delay;
1261
1262         return 0;
1263 }
1264
1265 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1266 {
1267         struct platform_device *pdev = pdata->pdev;
1268         struct device *dev = &pdev->dev;
1269         int i, ret, max_irqs;
1270
1271         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1272                 max_irqs = 1;
1273         else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1274                 max_irqs = 2;
1275         else
1276                 max_irqs = XGENE_MAX_ENET_IRQ;
1277
1278         for (i = 0; i < max_irqs; i++) {
1279                 ret = platform_get_irq(pdev, i);
1280                 if (ret <= 0) {
1281                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1282                                 max_irqs = i;
1283                                 pdata->rxq_cnt = max_irqs / 2;
1284                                 pdata->txq_cnt = max_irqs / 2;
1285                                 pdata->cq_cnt = max_irqs / 2;
1286                                 break;
1287                         }
1288                         dev_err(dev, "Unable to get ENET IRQ\n");
1289                         ret = ret ? : -ENXIO;
1290                         return ret;
1291                 }
1292                 pdata->irqs[i] = ret;
1293         }
1294
1295         return 0;
1296 }
1297
1298 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1299 {
1300         int ret;
1301
1302         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1303                 return 0;
1304
1305         if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1306                 return 0;
1307
1308         ret = xgene_enet_phy_connect(pdata->ndev);
1309         if (!ret)
1310                 pdata->mdio_driver = true;
1311
1312         return 0;
1313 }
1314
1315 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1316 {
1317         struct platform_device *pdev;
1318         struct net_device *ndev;
1319         struct device *dev;
1320         struct resource *res;
1321         void __iomem *base_addr;
1322         u32 offset;
1323         int ret = 0;
1324
1325         pdev = pdata->pdev;
1326         dev = &pdev->dev;
1327         ndev = pdata->ndev;
1328
1329         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1330         if (!res) {
1331                 dev_err(dev, "Resource enet_csr not defined\n");
1332                 return -ENODEV;
1333         }
1334         pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1335         if (!pdata->base_addr) {
1336                 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1337                 return -ENOMEM;
1338         }
1339
1340         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1341         if (!res) {
1342                 dev_err(dev, "Resource ring_csr not defined\n");
1343                 return -ENODEV;
1344         }
1345         pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1346                                                         resource_size(res));
1347         if (!pdata->ring_csr_addr) {
1348                 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1349                 return -ENOMEM;
1350         }
1351
1352         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1353         if (!res) {
1354                 dev_err(dev, "Resource ring_cmd not defined\n");
1355                 return -ENODEV;
1356         }
1357         pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1358                                                         resource_size(res));
1359         if (!pdata->ring_cmd_addr) {
1360                 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1361                 return -ENOMEM;
1362         }
1363
1364         if (dev->of_node)
1365                 xgene_get_port_id_dt(dev, pdata);
1366 #ifdef CONFIG_ACPI
1367         else
1368                 xgene_get_port_id_acpi(dev, pdata);
1369 #endif
1370
1371         if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1372                 eth_hw_addr_random(ndev);
1373
1374         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1375
1376         pdata->phy_mode = device_get_phy_mode(dev);
1377         if (pdata->phy_mode < 0) {
1378                 dev_err(dev, "Unable to get phy-connection-type\n");
1379                 return pdata->phy_mode;
1380         }
1381         if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1382             pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1383             pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1384                 dev_err(dev, "Incorrect phy-connection-type specified\n");
1385                 return -ENODEV;
1386         }
1387
1388         ret = xgene_get_tx_delay(pdata);
1389         if (ret)
1390                 return ret;
1391
1392         ret = xgene_get_rx_delay(pdata);
1393         if (ret)
1394                 return ret;
1395
1396         ret = xgene_enet_get_irqs(pdata);
1397         if (ret)
1398                 return ret;
1399
1400         ret = xgene_enet_check_phy_handle(pdata);
1401         if (ret)
1402                 return ret;
1403
1404         pdata->clk = devm_clk_get(&pdev->dev, NULL);
1405         if (IS_ERR(pdata->clk)) {
1406                 /* Firmware may have set up the clock already. */
1407                 dev_info(dev, "clocks have been setup already\n");
1408         }
1409
1410         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1411                 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1412         else
1413                 base_addr = pdata->base_addr;
1414         pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1415         pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1416         pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1417         pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1418         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1419             pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1420                 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1421                 offset = (pdata->enet_id == XGENE_ENET1) ?
1422                           BLOCK_ETH_MAC_CSR_OFFSET :
1423                           X2_BLOCK_ETH_MAC_CSR_OFFSET;
1424                 pdata->mcx_mac_csr_addr = base_addr + offset;
1425         } else {
1426                 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1427                 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1428         }
1429         pdata->rx_buff_cnt = NUM_PKT_BUF;
1430
1431         return 0;
1432 }
1433
1434 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1435 {
1436         struct xgene_enet_cle *enet_cle = &pdata->cle;
1437         struct net_device *ndev = pdata->ndev;
1438         struct xgene_enet_desc_ring *buf_pool;
1439         u16 dst_ring_num;
1440         int i, ret;
1441
1442         ret = pdata->port_ops->reset(pdata);
1443         if (ret)
1444                 return ret;
1445
1446         ret = xgene_enet_create_desc_rings(ndev);
1447         if (ret) {
1448                 netdev_err(ndev, "Error in ring configuration\n");
1449                 return ret;
1450         }
1451
1452         /* setup buffer pool */
1453         for (i = 0; i < pdata->rxq_cnt; i++) {
1454                 buf_pool = pdata->rx_ring[i]->buf_pool;
1455                 xgene_enet_init_bufpool(buf_pool);
1456                 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1457                 if (ret) {
1458                         xgene_enet_delete_desc_rings(pdata);
1459                         return ret;
1460                 }
1461         }
1462
1463         dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1464         buf_pool = pdata->rx_ring[0]->buf_pool;
1465         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1466                 /* Initialize and Enable  PreClassifier Tree */
1467                 enet_cle->max_nodes = 512;
1468                 enet_cle->max_dbptrs = 1024;
1469                 enet_cle->parsers = 3;
1470                 enet_cle->active_parser = PARSER_ALL;
1471                 enet_cle->ptree.start_node = 0;
1472                 enet_cle->ptree.start_dbptr = 0;
1473                 enet_cle->jump_bytes = 8;
1474                 ret = pdata->cle_ops->cle_init(pdata);
1475                 if (ret) {
1476                         netdev_err(ndev, "Preclass Tree init error\n");
1477                         return ret;
1478                 }
1479         } else {
1480                 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1481         }
1482
1483         pdata->phy_speed = SPEED_UNKNOWN;
1484         pdata->mac_ops->init(pdata);
1485
1486         return ret;
1487 }
1488
1489 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1490 {
1491         switch (pdata->phy_mode) {
1492         case PHY_INTERFACE_MODE_RGMII:
1493                 pdata->mac_ops = &xgene_gmac_ops;
1494                 pdata->port_ops = &xgene_gport_ops;
1495                 pdata->rm = RM3;
1496                 pdata->rxq_cnt = 1;
1497                 pdata->txq_cnt = 1;
1498                 pdata->cq_cnt = 0;
1499                 break;
1500         case PHY_INTERFACE_MODE_SGMII:
1501                 pdata->mac_ops = &xgene_sgmac_ops;
1502                 pdata->port_ops = &xgene_sgport_ops;
1503                 pdata->rm = RM1;
1504                 pdata->rxq_cnt = 1;
1505                 pdata->txq_cnt = 1;
1506                 pdata->cq_cnt = 1;
1507                 break;
1508         default:
1509                 pdata->mac_ops = &xgene_xgmac_ops;
1510                 pdata->port_ops = &xgene_xgport_ops;
1511                 pdata->cle_ops = &xgene_cle3in_ops;
1512                 pdata->rm = RM0;
1513                 if (!pdata->rxq_cnt) {
1514                         pdata->rxq_cnt = XGENE_NUM_RX_RING;
1515                         pdata->txq_cnt = XGENE_NUM_TX_RING;
1516                         pdata->cq_cnt = XGENE_NUM_TXC_RING;
1517                 }
1518                 break;
1519         }
1520
1521         if (pdata->enet_id == XGENE_ENET1) {
1522                 switch (pdata->port_id) {
1523                 case 0:
1524                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1525                                 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1526                                 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1527                                 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1528                                 pdata->ring_num = START_RING_NUM_0;
1529                         } else {
1530                                 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1531                                 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1532                                 pdata->bp_bufnum = START_BP_BUFNUM_0;
1533                                 pdata->ring_num = START_RING_NUM_0;
1534                         }
1535                         break;
1536                 case 1:
1537                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1538                                 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1539                                 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1540                                 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1541                                 pdata->ring_num = XG_START_RING_NUM_1;
1542                         } else {
1543                                 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1544                                 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1545                                 pdata->bp_bufnum = START_BP_BUFNUM_1;
1546                                 pdata->ring_num = START_RING_NUM_1;
1547                         }
1548                         break;
1549                 default:
1550                         break;
1551                 }
1552                 pdata->ring_ops = &xgene_ring1_ops;
1553         } else {
1554                 switch (pdata->port_id) {
1555                 case 0:
1556                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1557                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1558                         pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1559                         pdata->ring_num = X2_START_RING_NUM_0;
1560                         break;
1561                 case 1:
1562                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1563                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1564                         pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1565                         pdata->ring_num = X2_START_RING_NUM_1;
1566                         break;
1567                 default:
1568                         break;
1569                 }
1570                 pdata->rm = RM0;
1571                 pdata->ring_ops = &xgene_ring2_ops;
1572         }
1573 }
1574
1575 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1576 {
1577         struct napi_struct *napi;
1578         int i;
1579
1580         for (i = 0; i < pdata->rxq_cnt; i++) {
1581                 napi = &pdata->rx_ring[i]->napi;
1582                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1583                                NAPI_POLL_WEIGHT);
1584         }
1585
1586         for (i = 0; i < pdata->cq_cnt; i++) {
1587                 napi = &pdata->tx_ring[i]->cp_ring->napi;
1588                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1589                                NAPI_POLL_WEIGHT);
1590         }
1591 }
1592
1593 static int xgene_enet_probe(struct platform_device *pdev)
1594 {
1595         struct net_device *ndev;
1596         struct xgene_enet_pdata *pdata;
1597         struct device *dev = &pdev->dev;
1598         void (*link_state)(struct work_struct *);
1599         const struct of_device_id *of_id;
1600         int ret;
1601
1602         ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1603                                   XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
1604         if (!ndev)
1605                 return -ENOMEM;
1606
1607         pdata = netdev_priv(ndev);
1608
1609         pdata->pdev = pdev;
1610         pdata->ndev = ndev;
1611         SET_NETDEV_DEV(ndev, dev);
1612         platform_set_drvdata(pdev, pdata);
1613         ndev->netdev_ops = &xgene_ndev_ops;
1614         xgene_enet_set_ethtool_ops(ndev);
1615         ndev->features |= NETIF_F_IP_CSUM |
1616                           NETIF_F_GSO |
1617                           NETIF_F_GRO |
1618                           NETIF_F_SG;
1619
1620         of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1621         if (of_id) {
1622                 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1623         }
1624 #ifdef CONFIG_ACPI
1625         else {
1626                 const struct acpi_device_id *acpi_id;
1627
1628                 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1629                 if (acpi_id)
1630                         pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1631         }
1632 #endif
1633         if (!pdata->enet_id) {
1634                 free_netdev(ndev);
1635                 return -ENODEV;
1636         }
1637
1638         ret = xgene_enet_get_resources(pdata);
1639         if (ret)
1640                 goto err;
1641
1642         xgene_enet_setup_ops(pdata);
1643
1644         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1645                 ndev->features |= NETIF_F_TSO;
1646                 pdata->mss = XGENE_ENET_MSS;
1647         }
1648         ndev->hw_features = ndev->features;
1649
1650         ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1651         if (ret) {
1652                 netdev_err(ndev, "No usable DMA configuration\n");
1653                 goto err;
1654         }
1655
1656         ret = xgene_enet_init_hw(pdata);
1657         if (ret)
1658                 goto err_netdev;
1659
1660         link_state = pdata->mac_ops->link_state;
1661         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1662                 INIT_DELAYED_WORK(&pdata->link_work, link_state);
1663         } else if (!pdata->mdio_driver) {
1664                 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1665                         ret = xgene_enet_mdio_config(pdata);
1666                 else
1667                         INIT_DELAYED_WORK(&pdata->link_work, link_state);
1668         }
1669         if (ret)
1670                 goto err;
1671
1672         xgene_enet_napi_add(pdata);
1673         ret = register_netdev(ndev);
1674         if (ret) {
1675                 netdev_err(ndev, "Failed to register netdev\n");
1676                 goto err;
1677         }
1678
1679         return 0;
1680
1681 err_netdev:
1682         unregister_netdev(ndev);
1683 err:
1684         free_netdev(ndev);
1685         return ret;
1686 }
1687
1688 static int xgene_enet_remove(struct platform_device *pdev)
1689 {
1690         struct xgene_enet_pdata *pdata;
1691         const struct xgene_mac_ops *mac_ops;
1692         struct net_device *ndev;
1693
1694         pdata = platform_get_drvdata(pdev);
1695         mac_ops = pdata->mac_ops;
1696         ndev = pdata->ndev;
1697
1698         rtnl_lock();
1699         if (netif_running(ndev))
1700                 dev_close(ndev);
1701         rtnl_unlock();
1702
1703         if (pdata->mdio_driver)
1704                 xgene_enet_phy_disconnect(pdata);
1705         else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1706                 xgene_enet_mdio_remove(pdata);
1707
1708         unregister_netdev(ndev);
1709         pdata->port_ops->shutdown(pdata);
1710         xgene_enet_delete_desc_rings(pdata);
1711         free_netdev(ndev);
1712
1713         return 0;
1714 }
1715
1716 static void xgene_enet_shutdown(struct platform_device *pdev)
1717 {
1718         struct xgene_enet_pdata *pdata;
1719
1720         pdata = platform_get_drvdata(pdev);
1721         if (!pdata)
1722                 return;
1723
1724         if (!pdata->ndev)
1725                 return;
1726
1727         xgene_enet_remove(pdev);
1728 }
1729
1730 #ifdef CONFIG_ACPI
1731 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1732         { "APMC0D05", XGENE_ENET1},
1733         { "APMC0D30", XGENE_ENET1},
1734         { "APMC0D31", XGENE_ENET1},
1735         { "APMC0D3F", XGENE_ENET1},
1736         { "APMC0D26", XGENE_ENET2},
1737         { "APMC0D25", XGENE_ENET2},
1738         { }
1739 };
1740 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1741 #endif
1742
1743 #ifdef CONFIG_OF
1744 static const struct of_device_id xgene_enet_of_match[] = {
1745         {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
1746         {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1747         {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1748         {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1749         {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1750         {},
1751 };
1752
1753 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1754 #endif
1755
1756 static struct platform_driver xgene_enet_driver = {
1757         .driver = {
1758                    .name = "xgene-enet",
1759                    .of_match_table = of_match_ptr(xgene_enet_of_match),
1760                    .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1761         },
1762         .probe = xgene_enet_probe,
1763         .remove = xgene_enet_remove,
1764         .shutdown = xgene_enet_shutdown,
1765 };
1766
1767 module_platform_driver(xgene_enet_driver);
1768
1769 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1770 MODULE_VERSION(XGENE_DRV_VERSION);
1771 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1772 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1773 MODULE_LICENSE("GPL");