a4e50482a230cce77efb7b7bded7c3702a85a407
[cascardo/linux.git] / drivers / net / xen-netfront.c
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47
48 #include <asm/xen/page.h>
49 #include <xen/xen.h>
50 #include <xen/xenbus.h>
51 #include <xen/events.h>
52 #include <xen/page.h>
53 #include <xen/platform_pci.h>
54 #include <xen/grant_table.h>
55
56 #include <xen/interface/io/netif.h>
57 #include <xen/interface/memory.h>
58 #include <xen/interface/grant_table.h>
59
60 /* Module parameters */
61 static unsigned int xennet_max_queues;
62 module_param_named(max_queues, xennet_max_queues, uint, 0644);
63 MODULE_PARM_DESC(max_queues,
64                  "Maximum number of queues per virtual interface");
65
66 static const struct ethtool_ops xennet_ethtool_ops;
67
68 struct netfront_cb {
69         int pull_to;
70 };
71
72 #define NETFRONT_SKB_CB(skb)    ((struct netfront_cb *)((skb)->cb))
73
74 #define RX_COPY_THRESHOLD 256
75
76 #define GRANT_INVALID_REF       0
77
78 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
79 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
80
81 /* Minimum number of Rx slots (includes slot for GSO metadata). */
82 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
83
84 /* Queue name is interface name with "-qNNN" appended */
85 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
86
87 /* IRQ name is queue name with "-tx" or "-rx" appended */
88 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
89
90 struct netfront_stats {
91         u64                     rx_packets;
92         u64                     tx_packets;
93         u64                     rx_bytes;
94         u64                     tx_bytes;
95         struct u64_stats_sync   syncp;
96 };
97
98 struct netfront_info;
99
100 struct netfront_queue {
101         unsigned int id; /* Queue ID, 0-based */
102         char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
103         struct netfront_info *info;
104
105         struct napi_struct napi;
106
107         /* Split event channels support, tx_* == rx_* when using
108          * single event channel.
109          */
110         unsigned int tx_evtchn, rx_evtchn;
111         unsigned int tx_irq, rx_irq;
112         /* Only used when split event channels support is enabled */
113         char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
114         char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
115
116         spinlock_t   tx_lock;
117         struct xen_netif_tx_front_ring tx;
118         int tx_ring_ref;
119
120         /*
121          * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
122          * are linked from tx_skb_freelist through skb_entry.link.
123          *
124          *  NB. Freelist index entries are always going to be less than
125          *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
126          *  greater than PAGE_OFFSET: we use this property to distinguish
127          *  them.
128          */
129         union skb_entry {
130                 struct sk_buff *skb;
131                 unsigned long link;
132         } tx_skbs[NET_TX_RING_SIZE];
133         grant_ref_t gref_tx_head;
134         grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
135         struct page *grant_tx_page[NET_TX_RING_SIZE];
136         unsigned tx_skb_freelist;
137
138         spinlock_t   rx_lock ____cacheline_aligned_in_smp;
139         struct xen_netif_rx_front_ring rx;
140         int rx_ring_ref;
141
142         struct timer_list rx_refill_timer;
143
144         struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
145         grant_ref_t gref_rx_head;
146         grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
147 };
148
149 struct netfront_info {
150         struct list_head list;
151         struct net_device *netdev;
152
153         struct xenbus_device *xbdev;
154
155         /* Multi-queue support */
156         struct netfront_queue *queues;
157
158         /* Statistics */
159         struct netfront_stats __percpu *stats;
160
161         atomic_t rx_gso_checksum_fixup;
162 };
163
164 struct netfront_rx_info {
165         struct xen_netif_rx_response rx;
166         struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
167 };
168
169 static void skb_entry_set_link(union skb_entry *list, unsigned short id)
170 {
171         list->link = id;
172 }
173
174 static int skb_entry_is_link(const union skb_entry *list)
175 {
176         BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
177         return (unsigned long)list->skb < PAGE_OFFSET;
178 }
179
180 /*
181  * Access macros for acquiring freeing slots in tx_skbs[].
182  */
183
184 static void add_id_to_freelist(unsigned *head, union skb_entry *list,
185                                unsigned short id)
186 {
187         skb_entry_set_link(&list[id], *head);
188         *head = id;
189 }
190
191 static unsigned short get_id_from_freelist(unsigned *head,
192                                            union skb_entry *list)
193 {
194         unsigned int id = *head;
195         *head = list[id].link;
196         return id;
197 }
198
199 static int xennet_rxidx(RING_IDX idx)
200 {
201         return idx & (NET_RX_RING_SIZE - 1);
202 }
203
204 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
205                                          RING_IDX ri)
206 {
207         int i = xennet_rxidx(ri);
208         struct sk_buff *skb = queue->rx_skbs[i];
209         queue->rx_skbs[i] = NULL;
210         return skb;
211 }
212
213 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
214                                             RING_IDX ri)
215 {
216         int i = xennet_rxidx(ri);
217         grant_ref_t ref = queue->grant_rx_ref[i];
218         queue->grant_rx_ref[i] = GRANT_INVALID_REF;
219         return ref;
220 }
221
222 #ifdef CONFIG_SYSFS
223 static int xennet_sysfs_addif(struct net_device *netdev);
224 static void xennet_sysfs_delif(struct net_device *netdev);
225 #else /* !CONFIG_SYSFS */
226 #define xennet_sysfs_addif(dev) (0)
227 #define xennet_sysfs_delif(dev) do { } while (0)
228 #endif
229
230 static bool xennet_can_sg(struct net_device *dev)
231 {
232         return dev->features & NETIF_F_SG;
233 }
234
235
236 static void rx_refill_timeout(unsigned long data)
237 {
238         struct netfront_queue *queue = (struct netfront_queue *)data;
239         napi_schedule(&queue->napi);
240 }
241
242 static int netfront_tx_slot_available(struct netfront_queue *queue)
243 {
244         return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
245                 (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
246 }
247
248 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
249 {
250         struct net_device *dev = queue->info->netdev;
251         struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
252
253         if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
254             netfront_tx_slot_available(queue) &&
255             likely(netif_running(dev)))
256                 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
257 }
258
259
260 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
261 {
262         struct sk_buff *skb;
263         struct page *page;
264
265         skb = __netdev_alloc_skb(queue->info->netdev,
266                                  RX_COPY_THRESHOLD + NET_IP_ALIGN,
267                                  GFP_ATOMIC | __GFP_NOWARN);
268         if (unlikely(!skb))
269                 return NULL;
270
271         page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
272         if (!page) {
273                 kfree_skb(skb);
274                 return NULL;
275         }
276         skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
277
278         /* Align ip header to a 16 bytes boundary */
279         skb_reserve(skb, NET_IP_ALIGN);
280         skb->dev = queue->info->netdev;
281
282         return skb;
283 }
284
285
286 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
287 {
288         RING_IDX req_prod = queue->rx.req_prod_pvt;
289         int notify;
290
291         if (unlikely(!netif_carrier_ok(queue->info->netdev)))
292                 return;
293
294         for (req_prod = queue->rx.req_prod_pvt;
295              req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
296              req_prod++) {
297                 struct sk_buff *skb;
298                 unsigned short id;
299                 grant_ref_t ref;
300                 unsigned long pfn;
301                 struct xen_netif_rx_request *req;
302
303                 skb = xennet_alloc_one_rx_buffer(queue);
304                 if (!skb)
305                         break;
306
307                 id = xennet_rxidx(req_prod);
308
309                 BUG_ON(queue->rx_skbs[id]);
310                 queue->rx_skbs[id] = skb;
311
312                 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
313                 BUG_ON((signed short)ref < 0);
314                 queue->grant_rx_ref[id] = ref;
315
316                 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
317
318                 req = RING_GET_REQUEST(&queue->rx, req_prod);
319                 gnttab_grant_foreign_access_ref(ref,
320                                                 queue->info->xbdev->otherend_id,
321                                                 pfn_to_mfn(pfn),
322                                                 0);
323
324                 req->id = id;
325                 req->gref = ref;
326         }
327
328         queue->rx.req_prod_pvt = req_prod;
329
330         /* Not enough requests? Try again later. */
331         if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
332                 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
333                 return;
334         }
335
336         wmb();          /* barrier so backend seens requests */
337
338         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
339         if (notify)
340                 notify_remote_via_irq(queue->rx_irq);
341 }
342
343 static int xennet_open(struct net_device *dev)
344 {
345         struct netfront_info *np = netdev_priv(dev);
346         unsigned int num_queues = dev->real_num_tx_queues;
347         unsigned int i = 0;
348         struct netfront_queue *queue = NULL;
349
350         for (i = 0; i < num_queues; ++i) {
351                 queue = &np->queues[i];
352                 napi_enable(&queue->napi);
353
354                 spin_lock_bh(&queue->rx_lock);
355                 if (netif_carrier_ok(dev)) {
356                         xennet_alloc_rx_buffers(queue);
357                         queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
358                         if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
359                                 napi_schedule(&queue->napi);
360                 }
361                 spin_unlock_bh(&queue->rx_lock);
362         }
363
364         netif_tx_start_all_queues(dev);
365
366         return 0;
367 }
368
369 static void xennet_tx_buf_gc(struct netfront_queue *queue)
370 {
371         RING_IDX cons, prod;
372         unsigned short id;
373         struct sk_buff *skb;
374
375         BUG_ON(!netif_carrier_ok(queue->info->netdev));
376
377         do {
378                 prod = queue->tx.sring->rsp_prod;
379                 rmb(); /* Ensure we see responses up to 'rp'. */
380
381                 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
382                         struct xen_netif_tx_response *txrsp;
383
384                         txrsp = RING_GET_RESPONSE(&queue->tx, cons);
385                         if (txrsp->status == XEN_NETIF_RSP_NULL)
386                                 continue;
387
388                         id  = txrsp->id;
389                         skb = queue->tx_skbs[id].skb;
390                         if (unlikely(gnttab_query_foreign_access(
391                                 queue->grant_tx_ref[id]) != 0)) {
392                                 pr_alert("%s: warning -- grant still in use by backend domain\n",
393                                          __func__);
394                                 BUG();
395                         }
396                         gnttab_end_foreign_access_ref(
397                                 queue->grant_tx_ref[id], GNTMAP_readonly);
398                         gnttab_release_grant_reference(
399                                 &queue->gref_tx_head, queue->grant_tx_ref[id]);
400                         queue->grant_tx_ref[id] = GRANT_INVALID_REF;
401                         queue->grant_tx_page[id] = NULL;
402                         add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
403                         dev_kfree_skb_irq(skb);
404                 }
405
406                 queue->tx.rsp_cons = prod;
407
408                 /*
409                  * Set a new event, then check for race with update of tx_cons.
410                  * Note that it is essential to schedule a callback, no matter
411                  * how few buffers are pending. Even if there is space in the
412                  * transmit ring, higher layers may be blocked because too much
413                  * data is outstanding: in such cases notification from Xen is
414                  * likely to be the only kick that we'll get.
415                  */
416                 queue->tx.sring->rsp_event =
417                         prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
418                 mb();           /* update shared area */
419         } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
420
421         xennet_maybe_wake_tx(queue);
422 }
423
424 static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
425                               struct xen_netif_tx_request *tx)
426 {
427         char *data = skb->data;
428         unsigned long mfn;
429         RING_IDX prod = queue->tx.req_prod_pvt;
430         int frags = skb_shinfo(skb)->nr_frags;
431         unsigned int offset = offset_in_page(data);
432         unsigned int len = skb_headlen(skb);
433         unsigned int id;
434         grant_ref_t ref;
435         int i;
436
437         /* While the header overlaps a page boundary (including being
438            larger than a page), split it it into page-sized chunks. */
439         while (len > PAGE_SIZE - offset) {
440                 tx->size = PAGE_SIZE - offset;
441                 tx->flags |= XEN_NETTXF_more_data;
442                 len -= tx->size;
443                 data += tx->size;
444                 offset = 0;
445
446                 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
447                 queue->tx_skbs[id].skb = skb_get(skb);
448                 tx = RING_GET_REQUEST(&queue->tx, prod++);
449                 tx->id = id;
450                 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
451                 BUG_ON((signed short)ref < 0);
452
453                 mfn = virt_to_mfn(data);
454                 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
455                                                 mfn, GNTMAP_readonly);
456
457                 queue->grant_tx_page[id] = virt_to_page(data);
458                 tx->gref = queue->grant_tx_ref[id] = ref;
459                 tx->offset = offset;
460                 tx->size = len;
461                 tx->flags = 0;
462         }
463
464         /* Grant backend access to each skb fragment page. */
465         for (i = 0; i < frags; i++) {
466                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
467                 struct page *page = skb_frag_page(frag);
468
469                 len = skb_frag_size(frag);
470                 offset = frag->page_offset;
471
472                 /* Skip unused frames from start of page */
473                 page += offset >> PAGE_SHIFT;
474                 offset &= ~PAGE_MASK;
475
476                 while (len > 0) {
477                         unsigned long bytes;
478
479                         bytes = PAGE_SIZE - offset;
480                         if (bytes > len)
481                                 bytes = len;
482
483                         tx->flags |= XEN_NETTXF_more_data;
484
485                         id = get_id_from_freelist(&queue->tx_skb_freelist,
486                                                   queue->tx_skbs);
487                         queue->tx_skbs[id].skb = skb_get(skb);
488                         tx = RING_GET_REQUEST(&queue->tx, prod++);
489                         tx->id = id;
490                         ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
491                         BUG_ON((signed short)ref < 0);
492
493                         mfn = pfn_to_mfn(page_to_pfn(page));
494                         gnttab_grant_foreign_access_ref(ref,
495                                                         queue->info->xbdev->otherend_id,
496                                                         mfn, GNTMAP_readonly);
497
498                         queue->grant_tx_page[id] = page;
499                         tx->gref = queue->grant_tx_ref[id] = ref;
500                         tx->offset = offset;
501                         tx->size = bytes;
502                         tx->flags = 0;
503
504                         offset += bytes;
505                         len -= bytes;
506
507                         /* Next frame */
508                         if (offset == PAGE_SIZE && len) {
509                                 BUG_ON(!PageCompound(page));
510                                 page++;
511                                 offset = 0;
512                         }
513                 }
514         }
515
516         queue->tx.req_prod_pvt = prod;
517 }
518
519 /*
520  * Count how many ring slots are required to send the frags of this
521  * skb. Each frag might be a compound page.
522  */
523 static int xennet_count_skb_frag_slots(struct sk_buff *skb)
524 {
525         int i, frags = skb_shinfo(skb)->nr_frags;
526         int pages = 0;
527
528         for (i = 0; i < frags; i++) {
529                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
530                 unsigned long size = skb_frag_size(frag);
531                 unsigned long offset = frag->page_offset;
532
533                 /* Skip unused frames from start of page */
534                 offset &= ~PAGE_MASK;
535
536                 pages += PFN_UP(offset + size);
537         }
538
539         return pages;
540 }
541
542 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
543                                void *accel_priv, select_queue_fallback_t fallback)
544 {
545         unsigned int num_queues = dev->real_num_tx_queues;
546         u32 hash;
547         u16 queue_idx;
548
549         /* First, check if there is only one queue */
550         if (num_queues == 1) {
551                 queue_idx = 0;
552         } else {
553                 hash = skb_get_hash(skb);
554                 queue_idx = hash % num_queues;
555         }
556
557         return queue_idx;
558 }
559
560 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
561 {
562         unsigned short id;
563         struct netfront_info *np = netdev_priv(dev);
564         struct netfront_stats *stats = this_cpu_ptr(np->stats);
565         struct xen_netif_tx_request *tx;
566         char *data = skb->data;
567         RING_IDX i;
568         grant_ref_t ref;
569         unsigned long mfn;
570         int notify;
571         int slots;
572         unsigned int offset = offset_in_page(data);
573         unsigned int len = skb_headlen(skb);
574         unsigned long flags;
575         struct netfront_queue *queue = NULL;
576         unsigned int num_queues = dev->real_num_tx_queues;
577         u16 queue_index;
578
579         /* Drop the packet if no queues are set up */
580         if (num_queues < 1)
581                 goto drop;
582         /* Determine which queue to transmit this SKB on */
583         queue_index = skb_get_queue_mapping(skb);
584         queue = &np->queues[queue_index];
585
586         /* If skb->len is too big for wire format, drop skb and alert
587          * user about misconfiguration.
588          */
589         if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
590                 net_alert_ratelimited(
591                         "xennet: skb->len = %u, too big for wire format\n",
592                         skb->len);
593                 goto drop;
594         }
595
596         slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
597                 xennet_count_skb_frag_slots(skb);
598         if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
599                 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
600                                     slots, skb->len);
601                 if (skb_linearize(skb))
602                         goto drop;
603                 data = skb->data;
604                 offset = offset_in_page(data);
605                 len = skb_headlen(skb);
606         }
607
608         spin_lock_irqsave(&queue->tx_lock, flags);
609
610         if (unlikely(!netif_carrier_ok(dev) ||
611                      (slots > 1 && !xennet_can_sg(dev)) ||
612                      netif_needs_gso(dev, skb, netif_skb_features(skb)))) {
613                 spin_unlock_irqrestore(&queue->tx_lock, flags);
614                 goto drop;
615         }
616
617         i = queue->tx.req_prod_pvt;
618
619         id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
620         queue->tx_skbs[id].skb = skb;
621
622         tx = RING_GET_REQUEST(&queue->tx, i);
623
624         tx->id   = id;
625         ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
626         BUG_ON((signed short)ref < 0);
627         mfn = virt_to_mfn(data);
628         gnttab_grant_foreign_access_ref(
629                 ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
630         queue->grant_tx_page[id] = virt_to_page(data);
631         tx->gref = queue->grant_tx_ref[id] = ref;
632         tx->offset = offset;
633         tx->size = len;
634
635         tx->flags = 0;
636         if (skb->ip_summed == CHECKSUM_PARTIAL)
637                 /* local packet? */
638                 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
639         else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
640                 /* remote but checksummed. */
641                 tx->flags |= XEN_NETTXF_data_validated;
642
643         if (skb_shinfo(skb)->gso_size) {
644                 struct xen_netif_extra_info *gso;
645
646                 gso = (struct xen_netif_extra_info *)
647                         RING_GET_REQUEST(&queue->tx, ++i);
648
649                 tx->flags |= XEN_NETTXF_extra_info;
650
651                 gso->u.gso.size = skb_shinfo(skb)->gso_size;
652                 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
653                         XEN_NETIF_GSO_TYPE_TCPV6 :
654                         XEN_NETIF_GSO_TYPE_TCPV4;
655                 gso->u.gso.pad = 0;
656                 gso->u.gso.features = 0;
657
658                 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
659                 gso->flags = 0;
660         }
661
662         queue->tx.req_prod_pvt = i + 1;
663
664         xennet_make_frags(skb, queue, tx);
665         tx->size = skb->len;
666
667         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
668         if (notify)
669                 notify_remote_via_irq(queue->tx_irq);
670
671         u64_stats_update_begin(&stats->syncp);
672         stats->tx_bytes += skb->len;
673         stats->tx_packets++;
674         u64_stats_update_end(&stats->syncp);
675
676         /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
677         xennet_tx_buf_gc(queue);
678
679         if (!netfront_tx_slot_available(queue))
680                 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
681
682         spin_unlock_irqrestore(&queue->tx_lock, flags);
683
684         return NETDEV_TX_OK;
685
686  drop:
687         dev->stats.tx_dropped++;
688         dev_kfree_skb_any(skb);
689         return NETDEV_TX_OK;
690 }
691
692 static int xennet_close(struct net_device *dev)
693 {
694         struct netfront_info *np = netdev_priv(dev);
695         unsigned int num_queues = dev->real_num_tx_queues;
696         unsigned int i;
697         struct netfront_queue *queue;
698         netif_tx_stop_all_queues(np->netdev);
699         for (i = 0; i < num_queues; ++i) {
700                 queue = &np->queues[i];
701                 napi_disable(&queue->napi);
702         }
703         return 0;
704 }
705
706 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
707                                 grant_ref_t ref)
708 {
709         int new = xennet_rxidx(queue->rx.req_prod_pvt);
710
711         BUG_ON(queue->rx_skbs[new]);
712         queue->rx_skbs[new] = skb;
713         queue->grant_rx_ref[new] = ref;
714         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
715         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
716         queue->rx.req_prod_pvt++;
717 }
718
719 static int xennet_get_extras(struct netfront_queue *queue,
720                              struct xen_netif_extra_info *extras,
721                              RING_IDX rp)
722
723 {
724         struct xen_netif_extra_info *extra;
725         struct device *dev = &queue->info->netdev->dev;
726         RING_IDX cons = queue->rx.rsp_cons;
727         int err = 0;
728
729         do {
730                 struct sk_buff *skb;
731                 grant_ref_t ref;
732
733                 if (unlikely(cons + 1 == rp)) {
734                         if (net_ratelimit())
735                                 dev_warn(dev, "Missing extra info\n");
736                         err = -EBADR;
737                         break;
738                 }
739
740                 extra = (struct xen_netif_extra_info *)
741                         RING_GET_RESPONSE(&queue->rx, ++cons);
742
743                 if (unlikely(!extra->type ||
744                              extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
745                         if (net_ratelimit())
746                                 dev_warn(dev, "Invalid extra type: %d\n",
747                                         extra->type);
748                         err = -EINVAL;
749                 } else {
750                         memcpy(&extras[extra->type - 1], extra,
751                                sizeof(*extra));
752                 }
753
754                 skb = xennet_get_rx_skb(queue, cons);
755                 ref = xennet_get_rx_ref(queue, cons);
756                 xennet_move_rx_slot(queue, skb, ref);
757         } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
758
759         queue->rx.rsp_cons = cons;
760         return err;
761 }
762
763 static int xennet_get_responses(struct netfront_queue *queue,
764                                 struct netfront_rx_info *rinfo, RING_IDX rp,
765                                 struct sk_buff_head *list)
766 {
767         struct xen_netif_rx_response *rx = &rinfo->rx;
768         struct xen_netif_extra_info *extras = rinfo->extras;
769         struct device *dev = &queue->info->netdev->dev;
770         RING_IDX cons = queue->rx.rsp_cons;
771         struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
772         grant_ref_t ref = xennet_get_rx_ref(queue, cons);
773         int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
774         int slots = 1;
775         int err = 0;
776         unsigned long ret;
777
778         if (rx->flags & XEN_NETRXF_extra_info) {
779                 err = xennet_get_extras(queue, extras, rp);
780                 cons = queue->rx.rsp_cons;
781         }
782
783         for (;;) {
784                 if (unlikely(rx->status < 0 ||
785                              rx->offset + rx->status > PAGE_SIZE)) {
786                         if (net_ratelimit())
787                                 dev_warn(dev, "rx->offset: %x, size: %u\n",
788                                          rx->offset, rx->status);
789                         xennet_move_rx_slot(queue, skb, ref);
790                         err = -EINVAL;
791                         goto next;
792                 }
793
794                 /*
795                  * This definitely indicates a bug, either in this driver or in
796                  * the backend driver. In future this should flag the bad
797                  * situation to the system controller to reboot the backend.
798                  */
799                 if (ref == GRANT_INVALID_REF) {
800                         if (net_ratelimit())
801                                 dev_warn(dev, "Bad rx response id %d.\n",
802                                          rx->id);
803                         err = -EINVAL;
804                         goto next;
805                 }
806
807                 ret = gnttab_end_foreign_access_ref(ref, 0);
808                 BUG_ON(!ret);
809
810                 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
811
812                 __skb_queue_tail(list, skb);
813
814 next:
815                 if (!(rx->flags & XEN_NETRXF_more_data))
816                         break;
817
818                 if (cons + slots == rp) {
819                         if (net_ratelimit())
820                                 dev_warn(dev, "Need more slots\n");
821                         err = -ENOENT;
822                         break;
823                 }
824
825                 rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
826                 skb = xennet_get_rx_skb(queue, cons + slots);
827                 ref = xennet_get_rx_ref(queue, cons + slots);
828                 slots++;
829         }
830
831         if (unlikely(slots > max)) {
832                 if (net_ratelimit())
833                         dev_warn(dev, "Too many slots\n");
834                 err = -E2BIG;
835         }
836
837         if (unlikely(err))
838                 queue->rx.rsp_cons = cons + slots;
839
840         return err;
841 }
842
843 static int xennet_set_skb_gso(struct sk_buff *skb,
844                               struct xen_netif_extra_info *gso)
845 {
846         if (!gso->u.gso.size) {
847                 if (net_ratelimit())
848                         pr_warn("GSO size must not be zero\n");
849                 return -EINVAL;
850         }
851
852         if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
853             gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
854                 if (net_ratelimit())
855                         pr_warn("Bad GSO type %d\n", gso->u.gso.type);
856                 return -EINVAL;
857         }
858
859         skb_shinfo(skb)->gso_size = gso->u.gso.size;
860         skb_shinfo(skb)->gso_type =
861                 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
862                 SKB_GSO_TCPV4 :
863                 SKB_GSO_TCPV6;
864
865         /* Header must be checked, and gso_segs computed. */
866         skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
867         skb_shinfo(skb)->gso_segs = 0;
868
869         return 0;
870 }
871
872 static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
873                                   struct sk_buff *skb,
874                                   struct sk_buff_head *list)
875 {
876         struct skb_shared_info *shinfo = skb_shinfo(skb);
877         RING_IDX cons = queue->rx.rsp_cons;
878         struct sk_buff *nskb;
879
880         while ((nskb = __skb_dequeue(list))) {
881                 struct xen_netif_rx_response *rx =
882                         RING_GET_RESPONSE(&queue->rx, ++cons);
883                 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
884
885                 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
886                         unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
887
888                         BUG_ON(pull_to <= skb_headlen(skb));
889                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
890                 }
891                 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
892
893                 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
894                                 rx->offset, rx->status, PAGE_SIZE);
895
896                 skb_shinfo(nskb)->nr_frags = 0;
897                 kfree_skb(nskb);
898         }
899
900         return cons;
901 }
902
903 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
904 {
905         bool recalculate_partial_csum = false;
906
907         /*
908          * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
909          * peers can fail to set NETRXF_csum_blank when sending a GSO
910          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
911          * recalculate the partial checksum.
912          */
913         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
914                 struct netfront_info *np = netdev_priv(dev);
915                 atomic_inc(&np->rx_gso_checksum_fixup);
916                 skb->ip_summed = CHECKSUM_PARTIAL;
917                 recalculate_partial_csum = true;
918         }
919
920         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
921         if (skb->ip_summed != CHECKSUM_PARTIAL)
922                 return 0;
923
924         return skb_checksum_setup(skb, recalculate_partial_csum);
925 }
926
927 static int handle_incoming_queue(struct netfront_queue *queue,
928                                  struct sk_buff_head *rxq)
929 {
930         struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
931         int packets_dropped = 0;
932         struct sk_buff *skb;
933
934         while ((skb = __skb_dequeue(rxq)) != NULL) {
935                 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
936
937                 if (pull_to > skb_headlen(skb))
938                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
939
940                 /* Ethernet work: Delayed to here as it peeks the header. */
941                 skb->protocol = eth_type_trans(skb, queue->info->netdev);
942                 skb_reset_network_header(skb);
943
944                 if (checksum_setup(queue->info->netdev, skb)) {
945                         kfree_skb(skb);
946                         packets_dropped++;
947                         queue->info->netdev->stats.rx_errors++;
948                         continue;
949                 }
950
951                 u64_stats_update_begin(&stats->syncp);
952                 stats->rx_packets++;
953                 stats->rx_bytes += skb->len;
954                 u64_stats_update_end(&stats->syncp);
955
956                 /* Pass it up. */
957                 napi_gro_receive(&queue->napi, skb);
958         }
959
960         return packets_dropped;
961 }
962
963 static int xennet_poll(struct napi_struct *napi, int budget)
964 {
965         struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
966         struct net_device *dev = queue->info->netdev;
967         struct sk_buff *skb;
968         struct netfront_rx_info rinfo;
969         struct xen_netif_rx_response *rx = &rinfo.rx;
970         struct xen_netif_extra_info *extras = rinfo.extras;
971         RING_IDX i, rp;
972         int work_done;
973         struct sk_buff_head rxq;
974         struct sk_buff_head errq;
975         struct sk_buff_head tmpq;
976         int err;
977
978         spin_lock(&queue->rx_lock);
979
980         skb_queue_head_init(&rxq);
981         skb_queue_head_init(&errq);
982         skb_queue_head_init(&tmpq);
983
984         rp = queue->rx.sring->rsp_prod;
985         rmb(); /* Ensure we see queued responses up to 'rp'. */
986
987         i = queue->rx.rsp_cons;
988         work_done = 0;
989         while ((i != rp) && (work_done < budget)) {
990                 memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
991                 memset(extras, 0, sizeof(rinfo.extras));
992
993                 err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
994
995                 if (unlikely(err)) {
996 err:
997                         while ((skb = __skb_dequeue(&tmpq)))
998                                 __skb_queue_tail(&errq, skb);
999                         dev->stats.rx_errors++;
1000                         i = queue->rx.rsp_cons;
1001                         continue;
1002                 }
1003
1004                 skb = __skb_dequeue(&tmpq);
1005
1006                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1007                         struct xen_netif_extra_info *gso;
1008                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1009
1010                         if (unlikely(xennet_set_skb_gso(skb, gso))) {
1011                                 __skb_queue_head(&tmpq, skb);
1012                                 queue->rx.rsp_cons += skb_queue_len(&tmpq);
1013                                 goto err;
1014                         }
1015                 }
1016
1017                 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1018                 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1019                         NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1020
1021                 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1022                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1023                 skb->data_len = rx->status;
1024                 skb->len += rx->status;
1025
1026                 i = xennet_fill_frags(queue, skb, &tmpq);
1027
1028                 if (rx->flags & XEN_NETRXF_csum_blank)
1029                         skb->ip_summed = CHECKSUM_PARTIAL;
1030                 else if (rx->flags & XEN_NETRXF_data_validated)
1031                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1032
1033                 __skb_queue_tail(&rxq, skb);
1034
1035                 queue->rx.rsp_cons = ++i;
1036                 work_done++;
1037         }
1038
1039         __skb_queue_purge(&errq);
1040
1041         work_done -= handle_incoming_queue(queue, &rxq);
1042
1043         xennet_alloc_rx_buffers(queue);
1044
1045         if (work_done < budget) {
1046                 int more_to_do = 0;
1047
1048                 napi_complete(napi);
1049
1050                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1051                 if (more_to_do)
1052                         napi_schedule(napi);
1053         }
1054
1055         spin_unlock(&queue->rx_lock);
1056
1057         return work_done;
1058 }
1059
1060 static int xennet_change_mtu(struct net_device *dev, int mtu)
1061 {
1062         int max = xennet_can_sg(dev) ?
1063                 XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
1064
1065         if (mtu > max)
1066                 return -EINVAL;
1067         dev->mtu = mtu;
1068         return 0;
1069 }
1070
1071 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1072                                                     struct rtnl_link_stats64 *tot)
1073 {
1074         struct netfront_info *np = netdev_priv(dev);
1075         int cpu;
1076
1077         for_each_possible_cpu(cpu) {
1078                 struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1079                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1080                 unsigned int start;
1081
1082                 do {
1083                         start = u64_stats_fetch_begin_irq(&stats->syncp);
1084
1085                         rx_packets = stats->rx_packets;
1086                         tx_packets = stats->tx_packets;
1087                         rx_bytes = stats->rx_bytes;
1088                         tx_bytes = stats->tx_bytes;
1089                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1090
1091                 tot->rx_packets += rx_packets;
1092                 tot->tx_packets += tx_packets;
1093                 tot->rx_bytes   += rx_bytes;
1094                 tot->tx_bytes   += tx_bytes;
1095         }
1096
1097         tot->rx_errors  = dev->stats.rx_errors;
1098         tot->tx_dropped = dev->stats.tx_dropped;
1099
1100         return tot;
1101 }
1102
1103 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1104 {
1105         struct sk_buff *skb;
1106         int i;
1107
1108         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1109                 /* Skip over entries which are actually freelist references */
1110                 if (skb_entry_is_link(&queue->tx_skbs[i]))
1111                         continue;
1112
1113                 skb = queue->tx_skbs[i].skb;
1114                 get_page(queue->grant_tx_page[i]);
1115                 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1116                                           GNTMAP_readonly,
1117                                           (unsigned long)page_address(queue->grant_tx_page[i]));
1118                 queue->grant_tx_page[i] = NULL;
1119                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1120                 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1121                 dev_kfree_skb_irq(skb);
1122         }
1123 }
1124
1125 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1126 {
1127         int id, ref;
1128
1129         spin_lock_bh(&queue->rx_lock);
1130
1131         for (id = 0; id < NET_RX_RING_SIZE; id++) {
1132                 struct sk_buff *skb;
1133                 struct page *page;
1134
1135                 skb = queue->rx_skbs[id];
1136                 if (!skb)
1137                         continue;
1138
1139                 ref = queue->grant_rx_ref[id];
1140                 if (ref == GRANT_INVALID_REF)
1141                         continue;
1142
1143                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1144
1145                 /* gnttab_end_foreign_access() needs a page ref until
1146                  * foreign access is ended (which may be deferred).
1147                  */
1148                 get_page(page);
1149                 gnttab_end_foreign_access(ref, 0,
1150                                           (unsigned long)page_address(page));
1151                 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1152
1153                 kfree_skb(skb);
1154         }
1155
1156         spin_unlock_bh(&queue->rx_lock);
1157 }
1158
1159 static netdev_features_t xennet_fix_features(struct net_device *dev,
1160         netdev_features_t features)
1161 {
1162         struct netfront_info *np = netdev_priv(dev);
1163         int val;
1164
1165         if (features & NETIF_F_SG) {
1166                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1167                                  "%d", &val) < 0)
1168                         val = 0;
1169
1170                 if (!val)
1171                         features &= ~NETIF_F_SG;
1172         }
1173
1174         if (features & NETIF_F_IPV6_CSUM) {
1175                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1176                                  "feature-ipv6-csum-offload", "%d", &val) < 0)
1177                         val = 0;
1178
1179                 if (!val)
1180                         features &= ~NETIF_F_IPV6_CSUM;
1181         }
1182
1183         if (features & NETIF_F_TSO) {
1184                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1185                                  "feature-gso-tcpv4", "%d", &val) < 0)
1186                         val = 0;
1187
1188                 if (!val)
1189                         features &= ~NETIF_F_TSO;
1190         }
1191
1192         if (features & NETIF_F_TSO6) {
1193                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1194                                  "feature-gso-tcpv6", "%d", &val) < 0)
1195                         val = 0;
1196
1197                 if (!val)
1198                         features &= ~NETIF_F_TSO6;
1199         }
1200
1201         return features;
1202 }
1203
1204 static int xennet_set_features(struct net_device *dev,
1205         netdev_features_t features)
1206 {
1207         if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1208                 netdev_info(dev, "Reducing MTU because no SG offload");
1209                 dev->mtu = ETH_DATA_LEN;
1210         }
1211
1212         return 0;
1213 }
1214
1215 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1216 {
1217         struct netfront_queue *queue = dev_id;
1218         unsigned long flags;
1219
1220         spin_lock_irqsave(&queue->tx_lock, flags);
1221         xennet_tx_buf_gc(queue);
1222         spin_unlock_irqrestore(&queue->tx_lock, flags);
1223
1224         return IRQ_HANDLED;
1225 }
1226
1227 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1228 {
1229         struct netfront_queue *queue = dev_id;
1230         struct net_device *dev = queue->info->netdev;
1231
1232         if (likely(netif_carrier_ok(dev) &&
1233                    RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1234                 napi_schedule(&queue->napi);
1235
1236         return IRQ_HANDLED;
1237 }
1238
1239 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1240 {
1241         xennet_tx_interrupt(irq, dev_id);
1242         xennet_rx_interrupt(irq, dev_id);
1243         return IRQ_HANDLED;
1244 }
1245
1246 #ifdef CONFIG_NET_POLL_CONTROLLER
1247 static void xennet_poll_controller(struct net_device *dev)
1248 {
1249         /* Poll each queue */
1250         struct netfront_info *info = netdev_priv(dev);
1251         unsigned int num_queues = dev->real_num_tx_queues;
1252         unsigned int i;
1253         for (i = 0; i < num_queues; ++i)
1254                 xennet_interrupt(0, &info->queues[i]);
1255 }
1256 #endif
1257
1258 static const struct net_device_ops xennet_netdev_ops = {
1259         .ndo_open            = xennet_open,
1260         .ndo_stop            = xennet_close,
1261         .ndo_start_xmit      = xennet_start_xmit,
1262         .ndo_change_mtu      = xennet_change_mtu,
1263         .ndo_get_stats64     = xennet_get_stats64,
1264         .ndo_set_mac_address = eth_mac_addr,
1265         .ndo_validate_addr   = eth_validate_addr,
1266         .ndo_fix_features    = xennet_fix_features,
1267         .ndo_set_features    = xennet_set_features,
1268         .ndo_select_queue    = xennet_select_queue,
1269 #ifdef CONFIG_NET_POLL_CONTROLLER
1270         .ndo_poll_controller = xennet_poll_controller,
1271 #endif
1272 };
1273
1274 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1275 {
1276         int err;
1277         struct net_device *netdev;
1278         struct netfront_info *np;
1279
1280         netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1281         if (!netdev)
1282                 return ERR_PTR(-ENOMEM);
1283
1284         np                   = netdev_priv(netdev);
1285         np->xbdev            = dev;
1286
1287         /* No need to use rtnl_lock() before the call below as it
1288          * happens before register_netdev().
1289          */
1290         netif_set_real_num_tx_queues(netdev, 0);
1291         np->queues = NULL;
1292
1293         err = -ENOMEM;
1294         np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1295         if (np->stats == NULL)
1296                 goto exit;
1297
1298         netdev->netdev_ops      = &xennet_netdev_ops;
1299
1300         netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1301                                   NETIF_F_GSO_ROBUST;
1302         netdev->hw_features     = NETIF_F_SG |
1303                                   NETIF_F_IPV6_CSUM |
1304                                   NETIF_F_TSO | NETIF_F_TSO6;
1305
1306         /*
1307          * Assume that all hw features are available for now. This set
1308          * will be adjusted by the call to netdev_update_features() in
1309          * xennet_connect() which is the earliest point where we can
1310          * negotiate with the backend regarding supported features.
1311          */
1312         netdev->features |= netdev->hw_features;
1313
1314         netdev->ethtool_ops = &xennet_ethtool_ops;
1315         SET_NETDEV_DEV(netdev, &dev->dev);
1316
1317         netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1318
1319         np->netdev = netdev;
1320
1321         netif_carrier_off(netdev);
1322
1323         return netdev;
1324
1325  exit:
1326         free_netdev(netdev);
1327         return ERR_PTR(err);
1328 }
1329
1330 /**
1331  * Entry point to this code when a new device is created.  Allocate the basic
1332  * structures and the ring buffers for communication with the backend, and
1333  * inform the backend of the appropriate details for those.
1334  */
1335 static int netfront_probe(struct xenbus_device *dev,
1336                           const struct xenbus_device_id *id)
1337 {
1338         int err;
1339         struct net_device *netdev;
1340         struct netfront_info *info;
1341
1342         netdev = xennet_create_dev(dev);
1343         if (IS_ERR(netdev)) {
1344                 err = PTR_ERR(netdev);
1345                 xenbus_dev_fatal(dev, err, "creating netdev");
1346                 return err;
1347         }
1348
1349         info = netdev_priv(netdev);
1350         dev_set_drvdata(&dev->dev, info);
1351
1352         err = register_netdev(info->netdev);
1353         if (err) {
1354                 pr_warn("%s: register_netdev err=%d\n", __func__, err);
1355                 goto fail;
1356         }
1357
1358         err = xennet_sysfs_addif(info->netdev);
1359         if (err) {
1360                 unregister_netdev(info->netdev);
1361                 pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1362                 goto fail;
1363         }
1364
1365         return 0;
1366
1367  fail:
1368         free_netdev(netdev);
1369         dev_set_drvdata(&dev->dev, NULL);
1370         return err;
1371 }
1372
1373 static void xennet_end_access(int ref, void *page)
1374 {
1375         /* This frees the page as a side-effect */
1376         if (ref != GRANT_INVALID_REF)
1377                 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1378 }
1379
1380 static void xennet_disconnect_backend(struct netfront_info *info)
1381 {
1382         unsigned int i = 0;
1383         unsigned int num_queues = info->netdev->real_num_tx_queues;
1384
1385         netif_carrier_off(info->netdev);
1386
1387         for (i = 0; i < num_queues; ++i) {
1388                 struct netfront_queue *queue = &info->queues[i];
1389
1390                 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1391                         unbind_from_irqhandler(queue->tx_irq, queue);
1392                 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1393                         unbind_from_irqhandler(queue->tx_irq, queue);
1394                         unbind_from_irqhandler(queue->rx_irq, queue);
1395                 }
1396                 queue->tx_evtchn = queue->rx_evtchn = 0;
1397                 queue->tx_irq = queue->rx_irq = 0;
1398
1399                 napi_synchronize(&queue->napi);
1400
1401                 xennet_release_tx_bufs(queue);
1402                 xennet_release_rx_bufs(queue);
1403                 gnttab_free_grant_references(queue->gref_tx_head);
1404                 gnttab_free_grant_references(queue->gref_rx_head);
1405
1406                 /* End access and free the pages */
1407                 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1408                 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1409
1410                 queue->tx_ring_ref = GRANT_INVALID_REF;
1411                 queue->rx_ring_ref = GRANT_INVALID_REF;
1412                 queue->tx.sring = NULL;
1413                 queue->rx.sring = NULL;
1414         }
1415 }
1416
1417 /**
1418  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1419  * driver restart.  We tear down our netif structure and recreate it, but
1420  * leave the device-layer structures intact so that this is transparent to the
1421  * rest of the kernel.
1422  */
1423 static int netfront_resume(struct xenbus_device *dev)
1424 {
1425         struct netfront_info *info = dev_get_drvdata(&dev->dev);
1426
1427         dev_dbg(&dev->dev, "%s\n", dev->nodename);
1428
1429         xennet_disconnect_backend(info);
1430         return 0;
1431 }
1432
1433 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1434 {
1435         char *s, *e, *macstr;
1436         int i;
1437
1438         macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1439         if (IS_ERR(macstr))
1440                 return PTR_ERR(macstr);
1441
1442         for (i = 0; i < ETH_ALEN; i++) {
1443                 mac[i] = simple_strtoul(s, &e, 16);
1444                 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1445                         kfree(macstr);
1446                         return -ENOENT;
1447                 }
1448                 s = e+1;
1449         }
1450
1451         kfree(macstr);
1452         return 0;
1453 }
1454
1455 static int setup_netfront_single(struct netfront_queue *queue)
1456 {
1457         int err;
1458
1459         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1460         if (err < 0)
1461                 goto fail;
1462
1463         err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1464                                         xennet_interrupt,
1465                                         0, queue->info->netdev->name, queue);
1466         if (err < 0)
1467                 goto bind_fail;
1468         queue->rx_evtchn = queue->tx_evtchn;
1469         queue->rx_irq = queue->tx_irq = err;
1470
1471         return 0;
1472
1473 bind_fail:
1474         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1475         queue->tx_evtchn = 0;
1476 fail:
1477         return err;
1478 }
1479
1480 static int setup_netfront_split(struct netfront_queue *queue)
1481 {
1482         int err;
1483
1484         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1485         if (err < 0)
1486                 goto fail;
1487         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1488         if (err < 0)
1489                 goto alloc_rx_evtchn_fail;
1490
1491         snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1492                  "%s-tx", queue->name);
1493         err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1494                                         xennet_tx_interrupt,
1495                                         0, queue->tx_irq_name, queue);
1496         if (err < 0)
1497                 goto bind_tx_fail;
1498         queue->tx_irq = err;
1499
1500         snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1501                  "%s-rx", queue->name);
1502         err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1503                                         xennet_rx_interrupt,
1504                                         0, queue->rx_irq_name, queue);
1505         if (err < 0)
1506                 goto bind_rx_fail;
1507         queue->rx_irq = err;
1508
1509         return 0;
1510
1511 bind_rx_fail:
1512         unbind_from_irqhandler(queue->tx_irq, queue);
1513         queue->tx_irq = 0;
1514 bind_tx_fail:
1515         xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1516         queue->rx_evtchn = 0;
1517 alloc_rx_evtchn_fail:
1518         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1519         queue->tx_evtchn = 0;
1520 fail:
1521         return err;
1522 }
1523
1524 static int setup_netfront(struct xenbus_device *dev,
1525                         struct netfront_queue *queue, unsigned int feature_split_evtchn)
1526 {
1527         struct xen_netif_tx_sring *txs;
1528         struct xen_netif_rx_sring *rxs;
1529         int err;
1530
1531         queue->tx_ring_ref = GRANT_INVALID_REF;
1532         queue->rx_ring_ref = GRANT_INVALID_REF;
1533         queue->rx.sring = NULL;
1534         queue->tx.sring = NULL;
1535
1536         txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1537         if (!txs) {
1538                 err = -ENOMEM;
1539                 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1540                 goto fail;
1541         }
1542         SHARED_RING_INIT(txs);
1543         FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1544
1545         err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1546         if (err < 0)
1547                 goto grant_tx_ring_fail;
1548         queue->tx_ring_ref = err;
1549
1550         rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1551         if (!rxs) {
1552                 err = -ENOMEM;
1553                 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1554                 goto alloc_rx_ring_fail;
1555         }
1556         SHARED_RING_INIT(rxs);
1557         FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1558
1559         err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1560         if (err < 0)
1561                 goto grant_rx_ring_fail;
1562         queue->rx_ring_ref = err;
1563
1564         if (feature_split_evtchn)
1565                 err = setup_netfront_split(queue);
1566         /* setup single event channel if
1567          *  a) feature-split-event-channels == 0
1568          *  b) feature-split-event-channels == 1 but failed to setup
1569          */
1570         if (!feature_split_evtchn || (feature_split_evtchn && err))
1571                 err = setup_netfront_single(queue);
1572
1573         if (err)
1574                 goto alloc_evtchn_fail;
1575
1576         return 0;
1577
1578         /* If we fail to setup netfront, it is safe to just revoke access to
1579          * granted pages because backend is not accessing it at this point.
1580          */
1581 alloc_evtchn_fail:
1582         gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1583 grant_rx_ring_fail:
1584         free_page((unsigned long)rxs);
1585 alloc_rx_ring_fail:
1586         gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1587 grant_tx_ring_fail:
1588         free_page((unsigned long)txs);
1589 fail:
1590         return err;
1591 }
1592
1593 /* Queue-specific initialisation
1594  * This used to be done in xennet_create_dev() but must now
1595  * be run per-queue.
1596  */
1597 static int xennet_init_queue(struct netfront_queue *queue)
1598 {
1599         unsigned short i;
1600         int err = 0;
1601
1602         spin_lock_init(&queue->tx_lock);
1603         spin_lock_init(&queue->rx_lock);
1604
1605         init_timer(&queue->rx_refill_timer);
1606         queue->rx_refill_timer.data = (unsigned long)queue;
1607         queue->rx_refill_timer.function = rx_refill_timeout;
1608
1609         snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1610                  queue->info->netdev->name, queue->id);
1611
1612         /* Initialise tx_skbs as a free chain containing every entry. */
1613         queue->tx_skb_freelist = 0;
1614         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1615                 skb_entry_set_link(&queue->tx_skbs[i], i+1);
1616                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1617                 queue->grant_tx_page[i] = NULL;
1618         }
1619
1620         /* Clear out rx_skbs */
1621         for (i = 0; i < NET_RX_RING_SIZE; i++) {
1622                 queue->rx_skbs[i] = NULL;
1623                 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1624         }
1625
1626         /* A grant for every tx ring slot */
1627         if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1628                                           &queue->gref_tx_head) < 0) {
1629                 pr_alert("can't alloc tx grant refs\n");
1630                 err = -ENOMEM;
1631                 goto exit;
1632         }
1633
1634         /* A grant for every rx ring slot */
1635         if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1636                                           &queue->gref_rx_head) < 0) {
1637                 pr_alert("can't alloc rx grant refs\n");
1638                 err = -ENOMEM;
1639                 goto exit_free_tx;
1640         }
1641
1642         return 0;
1643
1644  exit_free_tx:
1645         gnttab_free_grant_references(queue->gref_tx_head);
1646  exit:
1647         return err;
1648 }
1649
1650 static int write_queue_xenstore_keys(struct netfront_queue *queue,
1651                            struct xenbus_transaction *xbt, int write_hierarchical)
1652 {
1653         /* Write the queue-specific keys into XenStore in the traditional
1654          * way for a single queue, or in a queue subkeys for multiple
1655          * queues.
1656          */
1657         struct xenbus_device *dev = queue->info->xbdev;
1658         int err;
1659         const char *message;
1660         char *path;
1661         size_t pathsize;
1662
1663         /* Choose the correct place to write the keys */
1664         if (write_hierarchical) {
1665                 pathsize = strlen(dev->nodename) + 10;
1666                 path = kzalloc(pathsize, GFP_KERNEL);
1667                 if (!path) {
1668                         err = -ENOMEM;
1669                         message = "out of memory while writing ring references";
1670                         goto error;
1671                 }
1672                 snprintf(path, pathsize, "%s/queue-%u",
1673                                 dev->nodename, queue->id);
1674         } else {
1675                 path = (char *)dev->nodename;
1676         }
1677
1678         /* Write ring references */
1679         err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1680                         queue->tx_ring_ref);
1681         if (err) {
1682                 message = "writing tx-ring-ref";
1683                 goto error;
1684         }
1685
1686         err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1687                         queue->rx_ring_ref);
1688         if (err) {
1689                 message = "writing rx-ring-ref";
1690                 goto error;
1691         }
1692
1693         /* Write event channels; taking into account both shared
1694          * and split event channel scenarios.
1695          */
1696         if (queue->tx_evtchn == queue->rx_evtchn) {
1697                 /* Shared event channel */
1698                 err = xenbus_printf(*xbt, path,
1699                                 "event-channel", "%u", queue->tx_evtchn);
1700                 if (err) {
1701                         message = "writing event-channel";
1702                         goto error;
1703                 }
1704         } else {
1705                 /* Split event channels */
1706                 err = xenbus_printf(*xbt, path,
1707                                 "event-channel-tx", "%u", queue->tx_evtchn);
1708                 if (err) {
1709                         message = "writing event-channel-tx";
1710                         goto error;
1711                 }
1712
1713                 err = xenbus_printf(*xbt, path,
1714                                 "event-channel-rx", "%u", queue->rx_evtchn);
1715                 if (err) {
1716                         message = "writing event-channel-rx";
1717                         goto error;
1718                 }
1719         }
1720
1721         if (write_hierarchical)
1722                 kfree(path);
1723         return 0;
1724
1725 error:
1726         if (write_hierarchical)
1727                 kfree(path);
1728         xenbus_dev_fatal(dev, err, "%s", message);
1729         return err;
1730 }
1731
1732 static void xennet_destroy_queues(struct netfront_info *info)
1733 {
1734         unsigned int i;
1735
1736         rtnl_lock();
1737
1738         for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1739                 struct netfront_queue *queue = &info->queues[i];
1740
1741                 if (netif_running(info->netdev))
1742                         napi_disable(&queue->napi);
1743                 netif_napi_del(&queue->napi);
1744         }
1745
1746         rtnl_unlock();
1747
1748         kfree(info->queues);
1749         info->queues = NULL;
1750 }
1751
1752 static int xennet_create_queues(struct netfront_info *info,
1753                                 unsigned int num_queues)
1754 {
1755         unsigned int i;
1756         int ret;
1757
1758         info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
1759                                GFP_KERNEL);
1760         if (!info->queues)
1761                 return -ENOMEM;
1762
1763         rtnl_lock();
1764
1765         for (i = 0; i < num_queues; i++) {
1766                 struct netfront_queue *queue = &info->queues[i];
1767
1768                 queue->id = i;
1769                 queue->info = info;
1770
1771                 ret = xennet_init_queue(queue);
1772                 if (ret < 0) {
1773                         dev_warn(&info->netdev->dev,
1774                                  "only created %d queues\n", i);
1775                         num_queues = i;
1776                         break;
1777                 }
1778
1779                 netif_napi_add(queue->info->netdev, &queue->napi,
1780                                xennet_poll, 64);
1781                 if (netif_running(info->netdev))
1782                         napi_enable(&queue->napi);
1783         }
1784
1785         netif_set_real_num_tx_queues(info->netdev, num_queues);
1786
1787         rtnl_unlock();
1788
1789         if (num_queues == 0) {
1790                 dev_err(&info->netdev->dev, "no queues\n");
1791                 return -EINVAL;
1792         }
1793         return 0;
1794 }
1795
1796 /* Common code used when first setting up, and when resuming. */
1797 static int talk_to_netback(struct xenbus_device *dev,
1798                            struct netfront_info *info)
1799 {
1800         const char *message;
1801         struct xenbus_transaction xbt;
1802         int err;
1803         unsigned int feature_split_evtchn;
1804         unsigned int i = 0;
1805         unsigned int max_queues = 0;
1806         struct netfront_queue *queue = NULL;
1807         unsigned int num_queues = 1;
1808
1809         info->netdev->irq = 0;
1810
1811         /* Check if backend supports multiple queues */
1812         err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1813                            "multi-queue-max-queues", "%u", &max_queues);
1814         if (err < 0)
1815                 max_queues = 1;
1816         num_queues = min(max_queues, xennet_max_queues);
1817
1818         /* Check feature-split-event-channels */
1819         err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1820                            "feature-split-event-channels", "%u",
1821                            &feature_split_evtchn);
1822         if (err < 0)
1823                 feature_split_evtchn = 0;
1824
1825         /* Read mac addr. */
1826         err = xen_net_read_mac(dev, info->netdev->dev_addr);
1827         if (err) {
1828                 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1829                 goto out;
1830         }
1831
1832         if (info->queues)
1833                 xennet_destroy_queues(info);
1834
1835         err = xennet_create_queues(info, num_queues);
1836         if (err < 0)
1837                 goto destroy_ring;
1838
1839         /* Create shared ring, alloc event channel -- for each queue */
1840         for (i = 0; i < num_queues; ++i) {
1841                 queue = &info->queues[i];
1842                 err = setup_netfront(dev, queue, feature_split_evtchn);
1843                 if (err) {
1844                         /* setup_netfront() will tidy up the current
1845                          * queue on error, but we need to clean up
1846                          * those already allocated.
1847                          */
1848                         if (i > 0) {
1849                                 rtnl_lock();
1850                                 netif_set_real_num_tx_queues(info->netdev, i);
1851                                 rtnl_unlock();
1852                                 goto destroy_ring;
1853                         } else {
1854                                 goto out;
1855                         }
1856                 }
1857         }
1858
1859 again:
1860         err = xenbus_transaction_start(&xbt);
1861         if (err) {
1862                 xenbus_dev_fatal(dev, err, "starting transaction");
1863                 goto destroy_ring;
1864         }
1865
1866         if (num_queues == 1) {
1867                 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1868                 if (err)
1869                         goto abort_transaction_no_dev_fatal;
1870         } else {
1871                 /* Write the number of queues */
1872                 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
1873                                     "%u", num_queues);
1874                 if (err) {
1875                         message = "writing multi-queue-num-queues";
1876                         goto abort_transaction_no_dev_fatal;
1877                 }
1878
1879                 /* Write the keys for each queue */
1880                 for (i = 0; i < num_queues; ++i) {
1881                         queue = &info->queues[i];
1882                         err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1883                         if (err)
1884                                 goto abort_transaction_no_dev_fatal;
1885                 }
1886         }
1887
1888         /* The remaining keys are not queue-specific */
1889         err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1890                             1);
1891         if (err) {
1892                 message = "writing request-rx-copy";
1893                 goto abort_transaction;
1894         }
1895
1896         err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1897         if (err) {
1898                 message = "writing feature-rx-notify";
1899                 goto abort_transaction;
1900         }
1901
1902         err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1903         if (err) {
1904                 message = "writing feature-sg";
1905                 goto abort_transaction;
1906         }
1907
1908         err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1909         if (err) {
1910                 message = "writing feature-gso-tcpv4";
1911                 goto abort_transaction;
1912         }
1913
1914         err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1915         if (err) {
1916                 message = "writing feature-gso-tcpv6";
1917                 goto abort_transaction;
1918         }
1919
1920         err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1921                            "1");
1922         if (err) {
1923                 message = "writing feature-ipv6-csum-offload";
1924                 goto abort_transaction;
1925         }
1926
1927         err = xenbus_transaction_end(xbt, 0);
1928         if (err) {
1929                 if (err == -EAGAIN)
1930                         goto again;
1931                 xenbus_dev_fatal(dev, err, "completing transaction");
1932                 goto destroy_ring;
1933         }
1934
1935         return 0;
1936
1937  abort_transaction:
1938         xenbus_dev_fatal(dev, err, "%s", message);
1939 abort_transaction_no_dev_fatal:
1940         xenbus_transaction_end(xbt, 1);
1941  destroy_ring:
1942         xennet_disconnect_backend(info);
1943         kfree(info->queues);
1944         info->queues = NULL;
1945         rtnl_lock();
1946         netif_set_real_num_tx_queues(info->netdev, 0);
1947         rtnl_unlock();
1948  out:
1949         return err;
1950 }
1951
1952 static int xennet_connect(struct net_device *dev)
1953 {
1954         struct netfront_info *np = netdev_priv(dev);
1955         unsigned int num_queues = 0;
1956         int err;
1957         unsigned int feature_rx_copy;
1958         unsigned int j = 0;
1959         struct netfront_queue *queue = NULL;
1960
1961         err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1962                            "feature-rx-copy", "%u", &feature_rx_copy);
1963         if (err != 1)
1964                 feature_rx_copy = 0;
1965
1966         if (!feature_rx_copy) {
1967                 dev_info(&dev->dev,
1968                          "backend does not support copying receive path\n");
1969                 return -ENODEV;
1970         }
1971
1972         err = talk_to_netback(np->xbdev, np);
1973         if (err)
1974                 return err;
1975
1976         /* talk_to_netback() sets the correct number of queues */
1977         num_queues = dev->real_num_tx_queues;
1978
1979         rtnl_lock();
1980         netdev_update_features(dev);
1981         rtnl_unlock();
1982
1983         /*
1984          * All public and private state should now be sane.  Get
1985          * ready to start sending and receiving packets and give the driver
1986          * domain a kick because we've probably just requeued some
1987          * packets.
1988          */
1989         netif_carrier_on(np->netdev);
1990         for (j = 0; j < num_queues; ++j) {
1991                 queue = &np->queues[j];
1992
1993                 notify_remote_via_irq(queue->tx_irq);
1994                 if (queue->tx_irq != queue->rx_irq)
1995                         notify_remote_via_irq(queue->rx_irq);
1996
1997                 spin_lock_irq(&queue->tx_lock);
1998                 xennet_tx_buf_gc(queue);
1999                 spin_unlock_irq(&queue->tx_lock);
2000
2001                 spin_lock_bh(&queue->rx_lock);
2002                 xennet_alloc_rx_buffers(queue);
2003                 spin_unlock_bh(&queue->rx_lock);
2004         }
2005
2006         return 0;
2007 }
2008
2009 /**
2010  * Callback received when the backend's state changes.
2011  */
2012 static void netback_changed(struct xenbus_device *dev,
2013                             enum xenbus_state backend_state)
2014 {
2015         struct netfront_info *np = dev_get_drvdata(&dev->dev);
2016         struct net_device *netdev = np->netdev;
2017
2018         dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2019
2020         switch (backend_state) {
2021         case XenbusStateInitialising:
2022         case XenbusStateInitialised:
2023         case XenbusStateReconfiguring:
2024         case XenbusStateReconfigured:
2025         case XenbusStateUnknown:
2026                 break;
2027
2028         case XenbusStateInitWait:
2029                 if (dev->state != XenbusStateInitialising)
2030                         break;
2031                 if (xennet_connect(netdev) != 0)
2032                         break;
2033                 xenbus_switch_state(dev, XenbusStateConnected);
2034                 break;
2035
2036         case XenbusStateConnected:
2037                 netdev_notify_peers(netdev);
2038                 break;
2039
2040         case XenbusStateClosed:
2041                 if (dev->state == XenbusStateClosed)
2042                         break;
2043                 /* Missed the backend's CLOSING state -- fallthrough */
2044         case XenbusStateClosing:
2045                 xenbus_frontend_closed(dev);
2046                 break;
2047         }
2048 }
2049
2050 static const struct xennet_stat {
2051         char name[ETH_GSTRING_LEN];
2052         u16 offset;
2053 } xennet_stats[] = {
2054         {
2055                 "rx_gso_checksum_fixup",
2056                 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2057         },
2058 };
2059
2060 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2061 {
2062         switch (string_set) {
2063         case ETH_SS_STATS:
2064                 return ARRAY_SIZE(xennet_stats);
2065         default:
2066                 return -EINVAL;
2067         }
2068 }
2069
2070 static void xennet_get_ethtool_stats(struct net_device *dev,
2071                                      struct ethtool_stats *stats, u64 * data)
2072 {
2073         void *np = netdev_priv(dev);
2074         int i;
2075
2076         for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2077                 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2078 }
2079
2080 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2081 {
2082         int i;
2083
2084         switch (stringset) {
2085         case ETH_SS_STATS:
2086                 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2087                         memcpy(data + i * ETH_GSTRING_LEN,
2088                                xennet_stats[i].name, ETH_GSTRING_LEN);
2089                 break;
2090         }
2091 }
2092
2093 static const struct ethtool_ops xennet_ethtool_ops =
2094 {
2095         .get_link = ethtool_op_get_link,
2096
2097         .get_sset_count = xennet_get_sset_count,
2098         .get_ethtool_stats = xennet_get_ethtool_stats,
2099         .get_strings = xennet_get_strings,
2100 };
2101
2102 #ifdef CONFIG_SYSFS
2103 static ssize_t show_rxbuf(struct device *dev,
2104                           struct device_attribute *attr, char *buf)
2105 {
2106         return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2107 }
2108
2109 static ssize_t store_rxbuf(struct device *dev,
2110                            struct device_attribute *attr,
2111                            const char *buf, size_t len)
2112 {
2113         char *endp;
2114         unsigned long target;
2115
2116         if (!capable(CAP_NET_ADMIN))
2117                 return -EPERM;
2118
2119         target = simple_strtoul(buf, &endp, 0);
2120         if (endp == buf)
2121                 return -EBADMSG;
2122
2123         /* rxbuf_min and rxbuf_max are no longer configurable. */
2124
2125         return len;
2126 }
2127
2128 static struct device_attribute xennet_attrs[] = {
2129         __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
2130         __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf),
2131         __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL),
2132 };
2133
2134 static int xennet_sysfs_addif(struct net_device *netdev)
2135 {
2136         int i;
2137         int err;
2138
2139         for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2140                 err = device_create_file(&netdev->dev,
2141                                            &xennet_attrs[i]);
2142                 if (err)
2143                         goto fail;
2144         }
2145         return 0;
2146
2147  fail:
2148         while (--i >= 0)
2149                 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2150         return err;
2151 }
2152
2153 static void xennet_sysfs_delif(struct net_device *netdev)
2154 {
2155         int i;
2156
2157         for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2158                 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2159 }
2160
2161 #endif /* CONFIG_SYSFS */
2162
2163 static int xennet_remove(struct xenbus_device *dev)
2164 {
2165         struct netfront_info *info = dev_get_drvdata(&dev->dev);
2166         unsigned int num_queues = info->netdev->real_num_tx_queues;
2167         struct netfront_queue *queue = NULL;
2168         unsigned int i = 0;
2169
2170         dev_dbg(&dev->dev, "%s\n", dev->nodename);
2171
2172         xennet_disconnect_backend(info);
2173
2174         xennet_sysfs_delif(info->netdev);
2175
2176         unregister_netdev(info->netdev);
2177
2178         for (i = 0; i < num_queues; ++i) {
2179                 queue = &info->queues[i];
2180                 del_timer_sync(&queue->rx_refill_timer);
2181         }
2182
2183         if (num_queues) {
2184                 kfree(info->queues);
2185                 info->queues = NULL;
2186         }
2187
2188         free_percpu(info->stats);
2189
2190         free_netdev(info->netdev);
2191
2192         return 0;
2193 }
2194
2195 static const struct xenbus_device_id netfront_ids[] = {
2196         { "vif" },
2197         { "" }
2198 };
2199
2200 static struct xenbus_driver netfront_driver = {
2201         .ids = netfront_ids,
2202         .probe = netfront_probe,
2203         .remove = xennet_remove,
2204         .resume = netfront_resume,
2205         .otherend_changed = netback_changed,
2206 };
2207
2208 static int __init netif_init(void)
2209 {
2210         if (!xen_domain())
2211                 return -ENODEV;
2212
2213         if (!xen_has_pv_nic_devices())
2214                 return -ENODEV;
2215
2216         pr_info("Initialising Xen virtual ethernet driver\n");
2217
2218         /* Allow as many queues as there are CPUs, by default */
2219         xennet_max_queues = num_online_cpus();
2220
2221         return xenbus_register_frontend(&netfront_driver);
2222 }
2223 module_init(netif_init);
2224
2225
2226 static void __exit netif_exit(void)
2227 {
2228         xenbus_unregister_driver(&netfront_driver);
2229 }
2230 module_exit(netif_exit);
2231
2232 MODULE_DESCRIPTION("Xen virtual network device frontend");
2233 MODULE_LICENSE("GPL");
2234 MODULE_ALIAS("xen:vif");
2235 MODULE_ALIAS("xennet");