Merge git://git.kvack.org/~bcrl/aio-next
[cascardo/linux.git] / drivers / net / xen-netfront.c
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47
48 #include <asm/xen/page.h>
49 #include <xen/xen.h>
50 #include <xen/xenbus.h>
51 #include <xen/events.h>
52 #include <xen/page.h>
53 #include <xen/platform_pci.h>
54 #include <xen/grant_table.h>
55
56 #include <xen/interface/io/netif.h>
57 #include <xen/interface/memory.h>
58 #include <xen/interface/grant_table.h>
59
60 /* Module parameters */
61 static unsigned int xennet_max_queues;
62 module_param_named(max_queues, xennet_max_queues, uint, 0644);
63 MODULE_PARM_DESC(max_queues,
64                  "Maximum number of queues per virtual interface");
65
66 static const struct ethtool_ops xennet_ethtool_ops;
67
68 struct netfront_cb {
69         int pull_to;
70 };
71
72 #define NETFRONT_SKB_CB(skb)    ((struct netfront_cb *)((skb)->cb))
73
74 #define RX_COPY_THRESHOLD 256
75
76 #define GRANT_INVALID_REF       0
77
78 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
79 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
80 #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
81
82 /* Queue name is interface name with "-qNNN" appended */
83 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
84
85 /* IRQ name is queue name with "-tx" or "-rx" appended */
86 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
87
88 struct netfront_stats {
89         u64                     rx_packets;
90         u64                     tx_packets;
91         u64                     rx_bytes;
92         u64                     tx_bytes;
93         struct u64_stats_sync   syncp;
94 };
95
96 struct netfront_info;
97
98 struct netfront_queue {
99         unsigned int id; /* Queue ID, 0-based */
100         char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
101         struct netfront_info *info;
102
103         struct napi_struct napi;
104
105         /* Split event channels support, tx_* == rx_* when using
106          * single event channel.
107          */
108         unsigned int tx_evtchn, rx_evtchn;
109         unsigned int tx_irq, rx_irq;
110         /* Only used when split event channels support is enabled */
111         char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
112         char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
113
114         spinlock_t   tx_lock;
115         struct xen_netif_tx_front_ring tx;
116         int tx_ring_ref;
117
118         /*
119          * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
120          * are linked from tx_skb_freelist through skb_entry.link.
121          *
122          *  NB. Freelist index entries are always going to be less than
123          *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
124          *  greater than PAGE_OFFSET: we use this property to distinguish
125          *  them.
126          */
127         union skb_entry {
128                 struct sk_buff *skb;
129                 unsigned long link;
130         } tx_skbs[NET_TX_RING_SIZE];
131         grant_ref_t gref_tx_head;
132         grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
133         struct page *grant_tx_page[NET_TX_RING_SIZE];
134         unsigned tx_skb_freelist;
135
136         spinlock_t   rx_lock ____cacheline_aligned_in_smp;
137         struct xen_netif_rx_front_ring rx;
138         int rx_ring_ref;
139
140         /* Receive-ring batched refills. */
141 #define RX_MIN_TARGET 8
142 #define RX_DFL_MIN_TARGET 64
143 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
144         unsigned rx_min_target, rx_max_target, rx_target;
145         struct sk_buff_head rx_batch;
146
147         struct timer_list rx_refill_timer;
148
149         struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
150         grant_ref_t gref_rx_head;
151         grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
152
153         unsigned long rx_pfn_array[NET_RX_RING_SIZE];
154         struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
155         struct mmu_update rx_mmu[NET_RX_RING_SIZE];
156 };
157
158 struct netfront_info {
159         struct list_head list;
160         struct net_device *netdev;
161
162         struct xenbus_device *xbdev;
163
164         /* Multi-queue support */
165         struct netfront_queue *queues;
166
167         /* Statistics */
168         struct netfront_stats __percpu *stats;
169
170         atomic_t rx_gso_checksum_fixup;
171 };
172
173 struct netfront_rx_info {
174         struct xen_netif_rx_response rx;
175         struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
176 };
177
178 static void skb_entry_set_link(union skb_entry *list, unsigned short id)
179 {
180         list->link = id;
181 }
182
183 static int skb_entry_is_link(const union skb_entry *list)
184 {
185         BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
186         return (unsigned long)list->skb < PAGE_OFFSET;
187 }
188
189 /*
190  * Access macros for acquiring freeing slots in tx_skbs[].
191  */
192
193 static void add_id_to_freelist(unsigned *head, union skb_entry *list,
194                                unsigned short id)
195 {
196         skb_entry_set_link(&list[id], *head);
197         *head = id;
198 }
199
200 static unsigned short get_id_from_freelist(unsigned *head,
201                                            union skb_entry *list)
202 {
203         unsigned int id = *head;
204         *head = list[id].link;
205         return id;
206 }
207
208 static int xennet_rxidx(RING_IDX idx)
209 {
210         return idx & (NET_RX_RING_SIZE - 1);
211 }
212
213 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
214                                          RING_IDX ri)
215 {
216         int i = xennet_rxidx(ri);
217         struct sk_buff *skb = queue->rx_skbs[i];
218         queue->rx_skbs[i] = NULL;
219         return skb;
220 }
221
222 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
223                                             RING_IDX ri)
224 {
225         int i = xennet_rxidx(ri);
226         grant_ref_t ref = queue->grant_rx_ref[i];
227         queue->grant_rx_ref[i] = GRANT_INVALID_REF;
228         return ref;
229 }
230
231 #ifdef CONFIG_SYSFS
232 static int xennet_sysfs_addif(struct net_device *netdev);
233 static void xennet_sysfs_delif(struct net_device *netdev);
234 #else /* !CONFIG_SYSFS */
235 #define xennet_sysfs_addif(dev) (0)
236 #define xennet_sysfs_delif(dev) do { } while (0)
237 #endif
238
239 static bool xennet_can_sg(struct net_device *dev)
240 {
241         return dev->features & NETIF_F_SG;
242 }
243
244
245 static void rx_refill_timeout(unsigned long data)
246 {
247         struct netfront_queue *queue = (struct netfront_queue *)data;
248         napi_schedule(&queue->napi);
249 }
250
251 static int netfront_tx_slot_available(struct netfront_queue *queue)
252 {
253         return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
254                 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
255 }
256
257 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
258 {
259         struct net_device *dev = queue->info->netdev;
260         struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
261
262         if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
263             netfront_tx_slot_available(queue) &&
264             likely(netif_running(dev)))
265                 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
266 }
267
268 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
269 {
270         unsigned short id;
271         struct sk_buff *skb;
272         struct page *page;
273         int i, batch_target, notify;
274         RING_IDX req_prod = queue->rx.req_prod_pvt;
275         grant_ref_t ref;
276         unsigned long pfn;
277         void *vaddr;
278         struct xen_netif_rx_request *req;
279
280         if (unlikely(!netif_carrier_ok(queue->info->netdev)))
281                 return;
282
283         /*
284          * Allocate skbuffs greedily, even though we batch updates to the
285          * receive ring. This creates a less bursty demand on the memory
286          * allocator, so should reduce the chance of failed allocation requests
287          * both for ourself and for other kernel subsystems.
288          */
289         batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons);
290         for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) {
291                 skb = __netdev_alloc_skb(queue->info->netdev,
292                                          RX_COPY_THRESHOLD + NET_IP_ALIGN,
293                                          GFP_ATOMIC | __GFP_NOWARN);
294                 if (unlikely(!skb))
295                         goto no_skb;
296
297                 /* Align ip header to a 16 bytes boundary */
298                 skb_reserve(skb, NET_IP_ALIGN);
299
300                 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
301                 if (!page) {
302                         kfree_skb(skb);
303 no_skb:
304                         /* Could not allocate any skbuffs. Try again later. */
305                         mod_timer(&queue->rx_refill_timer,
306                                   jiffies + (HZ/10));
307
308                         /* Any skbuffs queued for refill? Force them out. */
309                         if (i != 0)
310                                 goto refill;
311                         break;
312                 }
313
314                 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
315                 __skb_queue_tail(&queue->rx_batch, skb);
316         }
317
318         /* Is the batch large enough to be worthwhile? */
319         if (i < (queue->rx_target/2)) {
320                 if (req_prod > queue->rx.sring->req_prod)
321                         goto push;
322                 return;
323         }
324
325         /* Adjust our fill target if we risked running out of buffers. */
326         if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) &&
327             ((queue->rx_target *= 2) > queue->rx_max_target))
328                 queue->rx_target = queue->rx_max_target;
329
330  refill:
331         for (i = 0; ; i++) {
332                 skb = __skb_dequeue(&queue->rx_batch);
333                 if (skb == NULL)
334                         break;
335
336                 skb->dev = queue->info->netdev;
337
338                 id = xennet_rxidx(req_prod + i);
339
340                 BUG_ON(queue->rx_skbs[id]);
341                 queue->rx_skbs[id] = skb;
342
343                 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
344                 BUG_ON((signed short)ref < 0);
345                 queue->grant_rx_ref[id] = ref;
346
347                 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
348                 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
349
350                 req = RING_GET_REQUEST(&queue->rx, req_prod + i);
351                 gnttab_grant_foreign_access_ref(ref,
352                                                 queue->info->xbdev->otherend_id,
353                                                 pfn_to_mfn(pfn),
354                                                 0);
355
356                 req->id = id;
357                 req->gref = ref;
358         }
359
360         wmb();          /* barrier so backend seens requests */
361
362         /* Above is a suitable barrier to ensure backend will see requests. */
363         queue->rx.req_prod_pvt = req_prod + i;
364  push:
365         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
366         if (notify)
367                 notify_remote_via_irq(queue->rx_irq);
368 }
369
370 static int xennet_open(struct net_device *dev)
371 {
372         struct netfront_info *np = netdev_priv(dev);
373         unsigned int num_queues = dev->real_num_tx_queues;
374         unsigned int i = 0;
375         struct netfront_queue *queue = NULL;
376
377         for (i = 0; i < num_queues; ++i) {
378                 queue = &np->queues[i];
379                 napi_enable(&queue->napi);
380
381                 spin_lock_bh(&queue->rx_lock);
382                 if (netif_carrier_ok(dev)) {
383                         xennet_alloc_rx_buffers(queue);
384                         queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
385                         if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
386                                 napi_schedule(&queue->napi);
387                 }
388                 spin_unlock_bh(&queue->rx_lock);
389         }
390
391         netif_tx_start_all_queues(dev);
392
393         return 0;
394 }
395
396 static void xennet_tx_buf_gc(struct netfront_queue *queue)
397 {
398         RING_IDX cons, prod;
399         unsigned short id;
400         struct sk_buff *skb;
401
402         BUG_ON(!netif_carrier_ok(queue->info->netdev));
403
404         do {
405                 prod = queue->tx.sring->rsp_prod;
406                 rmb(); /* Ensure we see responses up to 'rp'. */
407
408                 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
409                         struct xen_netif_tx_response *txrsp;
410
411                         txrsp = RING_GET_RESPONSE(&queue->tx, cons);
412                         if (txrsp->status == XEN_NETIF_RSP_NULL)
413                                 continue;
414
415                         id  = txrsp->id;
416                         skb = queue->tx_skbs[id].skb;
417                         if (unlikely(gnttab_query_foreign_access(
418                                 queue->grant_tx_ref[id]) != 0)) {
419                                 pr_alert("%s: warning -- grant still in use by backend domain\n",
420                                          __func__);
421                                 BUG();
422                         }
423                         gnttab_end_foreign_access_ref(
424                                 queue->grant_tx_ref[id], GNTMAP_readonly);
425                         gnttab_release_grant_reference(
426                                 &queue->gref_tx_head, queue->grant_tx_ref[id]);
427                         queue->grant_tx_ref[id] = GRANT_INVALID_REF;
428                         queue->grant_tx_page[id] = NULL;
429                         add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
430                         dev_kfree_skb_irq(skb);
431                 }
432
433                 queue->tx.rsp_cons = prod;
434
435                 /*
436                  * Set a new event, then check for race with update of tx_cons.
437                  * Note that it is essential to schedule a callback, no matter
438                  * how few buffers are pending. Even if there is space in the
439                  * transmit ring, higher layers may be blocked because too much
440                  * data is outstanding: in such cases notification from Xen is
441                  * likely to be the only kick that we'll get.
442                  */
443                 queue->tx.sring->rsp_event =
444                         prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
445                 mb();           /* update shared area */
446         } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
447
448         xennet_maybe_wake_tx(queue);
449 }
450
451 static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
452                               struct xen_netif_tx_request *tx)
453 {
454         char *data = skb->data;
455         unsigned long mfn;
456         RING_IDX prod = queue->tx.req_prod_pvt;
457         int frags = skb_shinfo(skb)->nr_frags;
458         unsigned int offset = offset_in_page(data);
459         unsigned int len = skb_headlen(skb);
460         unsigned int id;
461         grant_ref_t ref;
462         int i;
463
464         /* While the header overlaps a page boundary (including being
465            larger than a page), split it it into page-sized chunks. */
466         while (len > PAGE_SIZE - offset) {
467                 tx->size = PAGE_SIZE - offset;
468                 tx->flags |= XEN_NETTXF_more_data;
469                 len -= tx->size;
470                 data += tx->size;
471                 offset = 0;
472
473                 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
474                 queue->tx_skbs[id].skb = skb_get(skb);
475                 tx = RING_GET_REQUEST(&queue->tx, prod++);
476                 tx->id = id;
477                 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
478                 BUG_ON((signed short)ref < 0);
479
480                 mfn = virt_to_mfn(data);
481                 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
482                                                 mfn, GNTMAP_readonly);
483
484                 queue->grant_tx_page[id] = virt_to_page(data);
485                 tx->gref = queue->grant_tx_ref[id] = ref;
486                 tx->offset = offset;
487                 tx->size = len;
488                 tx->flags = 0;
489         }
490
491         /* Grant backend access to each skb fragment page. */
492         for (i = 0; i < frags; i++) {
493                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
494                 struct page *page = skb_frag_page(frag);
495
496                 len = skb_frag_size(frag);
497                 offset = frag->page_offset;
498
499                 /* Data must not cross a page boundary. */
500                 BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
501
502                 /* Skip unused frames from start of page */
503                 page += offset >> PAGE_SHIFT;
504                 offset &= ~PAGE_MASK;
505
506                 while (len > 0) {
507                         unsigned long bytes;
508
509                         BUG_ON(offset >= PAGE_SIZE);
510
511                         bytes = PAGE_SIZE - offset;
512                         if (bytes > len)
513                                 bytes = len;
514
515                         tx->flags |= XEN_NETTXF_more_data;
516
517                         id = get_id_from_freelist(&queue->tx_skb_freelist,
518                                                   queue->tx_skbs);
519                         queue->tx_skbs[id].skb = skb_get(skb);
520                         tx = RING_GET_REQUEST(&queue->tx, prod++);
521                         tx->id = id;
522                         ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
523                         BUG_ON((signed short)ref < 0);
524
525                         mfn = pfn_to_mfn(page_to_pfn(page));
526                         gnttab_grant_foreign_access_ref(ref,
527                                                         queue->info->xbdev->otherend_id,
528                                                         mfn, GNTMAP_readonly);
529
530                         queue->grant_tx_page[id] = page;
531                         tx->gref = queue->grant_tx_ref[id] = ref;
532                         tx->offset = offset;
533                         tx->size = bytes;
534                         tx->flags = 0;
535
536                         offset += bytes;
537                         len -= bytes;
538
539                         /* Next frame */
540                         if (offset == PAGE_SIZE && len) {
541                                 BUG_ON(!PageCompound(page));
542                                 page++;
543                                 offset = 0;
544                         }
545                 }
546         }
547
548         queue->tx.req_prod_pvt = prod;
549 }
550
551 /*
552  * Count how many ring slots are required to send the frags of this
553  * skb. Each frag might be a compound page.
554  */
555 static int xennet_count_skb_frag_slots(struct sk_buff *skb)
556 {
557         int i, frags = skb_shinfo(skb)->nr_frags;
558         int pages = 0;
559
560         for (i = 0; i < frags; i++) {
561                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
562                 unsigned long size = skb_frag_size(frag);
563                 unsigned long offset = frag->page_offset;
564
565                 /* Skip unused frames from start of page */
566                 offset &= ~PAGE_MASK;
567
568                 pages += PFN_UP(offset + size);
569         }
570
571         return pages;
572 }
573
574 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
575                                void *accel_priv, select_queue_fallback_t fallback)
576 {
577         unsigned int num_queues = dev->real_num_tx_queues;
578         u32 hash;
579         u16 queue_idx;
580
581         /* First, check if there is only one queue */
582         if (num_queues == 1) {
583                 queue_idx = 0;
584         } else {
585                 hash = skb_get_hash(skb);
586                 queue_idx = hash % num_queues;
587         }
588
589         return queue_idx;
590 }
591
592 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
593 {
594         unsigned short id;
595         struct netfront_info *np = netdev_priv(dev);
596         struct netfront_stats *stats = this_cpu_ptr(np->stats);
597         struct xen_netif_tx_request *tx;
598         char *data = skb->data;
599         RING_IDX i;
600         grant_ref_t ref;
601         unsigned long mfn;
602         int notify;
603         int slots;
604         unsigned int offset = offset_in_page(data);
605         unsigned int len = skb_headlen(skb);
606         unsigned long flags;
607         struct netfront_queue *queue = NULL;
608         unsigned int num_queues = dev->real_num_tx_queues;
609         u16 queue_index;
610
611         /* Drop the packet if no queues are set up */
612         if (num_queues < 1)
613                 goto drop;
614         /* Determine which queue to transmit this SKB on */
615         queue_index = skb_get_queue_mapping(skb);
616         queue = &np->queues[queue_index];
617
618         /* If skb->len is too big for wire format, drop skb and alert
619          * user about misconfiguration.
620          */
621         if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
622                 net_alert_ratelimited(
623                         "xennet: skb->len = %u, too big for wire format\n",
624                         skb->len);
625                 goto drop;
626         }
627
628         slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
629                 xennet_count_skb_frag_slots(skb);
630         if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
631                 net_alert_ratelimited(
632                         "xennet: skb rides the rocket: %d slots\n", slots);
633                 goto drop;
634         }
635
636         spin_lock_irqsave(&queue->tx_lock, flags);
637
638         if (unlikely(!netif_carrier_ok(dev) ||
639                      (slots > 1 && !xennet_can_sg(dev)) ||
640                      netif_needs_gso(skb, netif_skb_features(skb)))) {
641                 spin_unlock_irqrestore(&queue->tx_lock, flags);
642                 goto drop;
643         }
644
645         i = queue->tx.req_prod_pvt;
646
647         id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
648         queue->tx_skbs[id].skb = skb;
649
650         tx = RING_GET_REQUEST(&queue->tx, i);
651
652         tx->id   = id;
653         ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
654         BUG_ON((signed short)ref < 0);
655         mfn = virt_to_mfn(data);
656         gnttab_grant_foreign_access_ref(
657                 ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly);
658         queue->grant_tx_page[id] = virt_to_page(data);
659         tx->gref = queue->grant_tx_ref[id] = ref;
660         tx->offset = offset;
661         tx->size = len;
662
663         tx->flags = 0;
664         if (skb->ip_summed == CHECKSUM_PARTIAL)
665                 /* local packet? */
666                 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
667         else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
668                 /* remote but checksummed. */
669                 tx->flags |= XEN_NETTXF_data_validated;
670
671         if (skb_shinfo(skb)->gso_size) {
672                 struct xen_netif_extra_info *gso;
673
674                 gso = (struct xen_netif_extra_info *)
675                         RING_GET_REQUEST(&queue->tx, ++i);
676
677                 tx->flags |= XEN_NETTXF_extra_info;
678
679                 gso->u.gso.size = skb_shinfo(skb)->gso_size;
680                 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
681                         XEN_NETIF_GSO_TYPE_TCPV6 :
682                         XEN_NETIF_GSO_TYPE_TCPV4;
683                 gso->u.gso.pad = 0;
684                 gso->u.gso.features = 0;
685
686                 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
687                 gso->flags = 0;
688         }
689
690         queue->tx.req_prod_pvt = i + 1;
691
692         xennet_make_frags(skb, queue, tx);
693         tx->size = skb->len;
694
695         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
696         if (notify)
697                 notify_remote_via_irq(queue->tx_irq);
698
699         u64_stats_update_begin(&stats->syncp);
700         stats->tx_bytes += skb->len;
701         stats->tx_packets++;
702         u64_stats_update_end(&stats->syncp);
703
704         /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
705         xennet_tx_buf_gc(queue);
706
707         if (!netfront_tx_slot_available(queue))
708                 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
709
710         spin_unlock_irqrestore(&queue->tx_lock, flags);
711
712         return NETDEV_TX_OK;
713
714  drop:
715         dev->stats.tx_dropped++;
716         dev_kfree_skb_any(skb);
717         return NETDEV_TX_OK;
718 }
719
720 static int xennet_close(struct net_device *dev)
721 {
722         struct netfront_info *np = netdev_priv(dev);
723         unsigned int num_queues = dev->real_num_tx_queues;
724         unsigned int i;
725         struct netfront_queue *queue;
726         netif_tx_stop_all_queues(np->netdev);
727         for (i = 0; i < num_queues; ++i) {
728                 queue = &np->queues[i];
729                 napi_disable(&queue->napi);
730         }
731         return 0;
732 }
733
734 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
735                                 grant_ref_t ref)
736 {
737         int new = xennet_rxidx(queue->rx.req_prod_pvt);
738
739         BUG_ON(queue->rx_skbs[new]);
740         queue->rx_skbs[new] = skb;
741         queue->grant_rx_ref[new] = ref;
742         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
743         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
744         queue->rx.req_prod_pvt++;
745 }
746
747 static int xennet_get_extras(struct netfront_queue *queue,
748                              struct xen_netif_extra_info *extras,
749                              RING_IDX rp)
750
751 {
752         struct xen_netif_extra_info *extra;
753         struct device *dev = &queue->info->netdev->dev;
754         RING_IDX cons = queue->rx.rsp_cons;
755         int err = 0;
756
757         do {
758                 struct sk_buff *skb;
759                 grant_ref_t ref;
760
761                 if (unlikely(cons + 1 == rp)) {
762                         if (net_ratelimit())
763                                 dev_warn(dev, "Missing extra info\n");
764                         err = -EBADR;
765                         break;
766                 }
767
768                 extra = (struct xen_netif_extra_info *)
769                         RING_GET_RESPONSE(&queue->rx, ++cons);
770
771                 if (unlikely(!extra->type ||
772                              extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
773                         if (net_ratelimit())
774                                 dev_warn(dev, "Invalid extra type: %d\n",
775                                         extra->type);
776                         err = -EINVAL;
777                 } else {
778                         memcpy(&extras[extra->type - 1], extra,
779                                sizeof(*extra));
780                 }
781
782                 skb = xennet_get_rx_skb(queue, cons);
783                 ref = xennet_get_rx_ref(queue, cons);
784                 xennet_move_rx_slot(queue, skb, ref);
785         } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
786
787         queue->rx.rsp_cons = cons;
788         return err;
789 }
790
791 static int xennet_get_responses(struct netfront_queue *queue,
792                                 struct netfront_rx_info *rinfo, RING_IDX rp,
793                                 struct sk_buff_head *list)
794 {
795         struct xen_netif_rx_response *rx = &rinfo->rx;
796         struct xen_netif_extra_info *extras = rinfo->extras;
797         struct device *dev = &queue->info->netdev->dev;
798         RING_IDX cons = queue->rx.rsp_cons;
799         struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
800         grant_ref_t ref = xennet_get_rx_ref(queue, cons);
801         int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
802         int slots = 1;
803         int err = 0;
804         unsigned long ret;
805
806         if (rx->flags & XEN_NETRXF_extra_info) {
807                 err = xennet_get_extras(queue, extras, rp);
808                 cons = queue->rx.rsp_cons;
809         }
810
811         for (;;) {
812                 if (unlikely(rx->status < 0 ||
813                              rx->offset + rx->status > PAGE_SIZE)) {
814                         if (net_ratelimit())
815                                 dev_warn(dev, "rx->offset: %x, size: %u\n",
816                                          rx->offset, rx->status);
817                         xennet_move_rx_slot(queue, skb, ref);
818                         err = -EINVAL;
819                         goto next;
820                 }
821
822                 /*
823                  * This definitely indicates a bug, either in this driver or in
824                  * the backend driver. In future this should flag the bad
825                  * situation to the system controller to reboot the backend.
826                  */
827                 if (ref == GRANT_INVALID_REF) {
828                         if (net_ratelimit())
829                                 dev_warn(dev, "Bad rx response id %d.\n",
830                                          rx->id);
831                         err = -EINVAL;
832                         goto next;
833                 }
834
835                 ret = gnttab_end_foreign_access_ref(ref, 0);
836                 BUG_ON(!ret);
837
838                 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
839
840                 __skb_queue_tail(list, skb);
841
842 next:
843                 if (!(rx->flags & XEN_NETRXF_more_data))
844                         break;
845
846                 if (cons + slots == rp) {
847                         if (net_ratelimit())
848                                 dev_warn(dev, "Need more slots\n");
849                         err = -ENOENT;
850                         break;
851                 }
852
853                 rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
854                 skb = xennet_get_rx_skb(queue, cons + slots);
855                 ref = xennet_get_rx_ref(queue, cons + slots);
856                 slots++;
857         }
858
859         if (unlikely(slots > max)) {
860                 if (net_ratelimit())
861                         dev_warn(dev, "Too many slots\n");
862                 err = -E2BIG;
863         }
864
865         if (unlikely(err))
866                 queue->rx.rsp_cons = cons + slots;
867
868         return err;
869 }
870
871 static int xennet_set_skb_gso(struct sk_buff *skb,
872                               struct xen_netif_extra_info *gso)
873 {
874         if (!gso->u.gso.size) {
875                 if (net_ratelimit())
876                         pr_warn("GSO size must not be zero\n");
877                 return -EINVAL;
878         }
879
880         if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
881             gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
882                 if (net_ratelimit())
883                         pr_warn("Bad GSO type %d\n", gso->u.gso.type);
884                 return -EINVAL;
885         }
886
887         skb_shinfo(skb)->gso_size = gso->u.gso.size;
888         skb_shinfo(skb)->gso_type =
889                 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
890                 SKB_GSO_TCPV4 :
891                 SKB_GSO_TCPV6;
892
893         /* Header must be checked, and gso_segs computed. */
894         skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
895         skb_shinfo(skb)->gso_segs = 0;
896
897         return 0;
898 }
899
900 static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
901                                   struct sk_buff *skb,
902                                   struct sk_buff_head *list)
903 {
904         struct skb_shared_info *shinfo = skb_shinfo(skb);
905         RING_IDX cons = queue->rx.rsp_cons;
906         struct sk_buff *nskb;
907
908         while ((nskb = __skb_dequeue(list))) {
909                 struct xen_netif_rx_response *rx =
910                         RING_GET_RESPONSE(&queue->rx, ++cons);
911                 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
912
913                 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
914                         unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
915
916                         BUG_ON(pull_to <= skb_headlen(skb));
917                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
918                 }
919                 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
920
921                 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
922                                 rx->offset, rx->status, PAGE_SIZE);
923
924                 skb_shinfo(nskb)->nr_frags = 0;
925                 kfree_skb(nskb);
926         }
927
928         return cons;
929 }
930
931 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
932 {
933         bool recalculate_partial_csum = false;
934
935         /*
936          * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
937          * peers can fail to set NETRXF_csum_blank when sending a GSO
938          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
939          * recalculate the partial checksum.
940          */
941         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
942                 struct netfront_info *np = netdev_priv(dev);
943                 atomic_inc(&np->rx_gso_checksum_fixup);
944                 skb->ip_summed = CHECKSUM_PARTIAL;
945                 recalculate_partial_csum = true;
946         }
947
948         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
949         if (skb->ip_summed != CHECKSUM_PARTIAL)
950                 return 0;
951
952         return skb_checksum_setup(skb, recalculate_partial_csum);
953 }
954
955 static int handle_incoming_queue(struct netfront_queue *queue,
956                                  struct sk_buff_head *rxq)
957 {
958         struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
959         int packets_dropped = 0;
960         struct sk_buff *skb;
961
962         while ((skb = __skb_dequeue(rxq)) != NULL) {
963                 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
964
965                 if (pull_to > skb_headlen(skb))
966                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
967
968                 /* Ethernet work: Delayed to here as it peeks the header. */
969                 skb->protocol = eth_type_trans(skb, queue->info->netdev);
970                 skb_reset_network_header(skb);
971
972                 if (checksum_setup(queue->info->netdev, skb)) {
973                         kfree_skb(skb);
974                         packets_dropped++;
975                         queue->info->netdev->stats.rx_errors++;
976                         continue;
977                 }
978
979                 u64_stats_update_begin(&stats->syncp);
980                 stats->rx_packets++;
981                 stats->rx_bytes += skb->len;
982                 u64_stats_update_end(&stats->syncp);
983
984                 /* Pass it up. */
985                 napi_gro_receive(&queue->napi, skb);
986         }
987
988         return packets_dropped;
989 }
990
991 static int xennet_poll(struct napi_struct *napi, int budget)
992 {
993         struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
994         struct net_device *dev = queue->info->netdev;
995         struct sk_buff *skb;
996         struct netfront_rx_info rinfo;
997         struct xen_netif_rx_response *rx = &rinfo.rx;
998         struct xen_netif_extra_info *extras = rinfo.extras;
999         RING_IDX i, rp;
1000         int work_done;
1001         struct sk_buff_head rxq;
1002         struct sk_buff_head errq;
1003         struct sk_buff_head tmpq;
1004         unsigned long flags;
1005         int err;
1006
1007         spin_lock(&queue->rx_lock);
1008
1009         skb_queue_head_init(&rxq);
1010         skb_queue_head_init(&errq);
1011         skb_queue_head_init(&tmpq);
1012
1013         rp = queue->rx.sring->rsp_prod;
1014         rmb(); /* Ensure we see queued responses up to 'rp'. */
1015
1016         i = queue->rx.rsp_cons;
1017         work_done = 0;
1018         while ((i != rp) && (work_done < budget)) {
1019                 memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
1020                 memset(extras, 0, sizeof(rinfo.extras));
1021
1022                 err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
1023
1024                 if (unlikely(err)) {
1025 err:
1026                         while ((skb = __skb_dequeue(&tmpq)))
1027                                 __skb_queue_tail(&errq, skb);
1028                         dev->stats.rx_errors++;
1029                         i = queue->rx.rsp_cons;
1030                         continue;
1031                 }
1032
1033                 skb = __skb_dequeue(&tmpq);
1034
1035                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1036                         struct xen_netif_extra_info *gso;
1037                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1038
1039                         if (unlikely(xennet_set_skb_gso(skb, gso))) {
1040                                 __skb_queue_head(&tmpq, skb);
1041                                 queue->rx.rsp_cons += skb_queue_len(&tmpq);
1042                                 goto err;
1043                         }
1044                 }
1045
1046                 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1047                 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1048                         NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1049
1050                 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1051                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1052                 skb->data_len = rx->status;
1053                 skb->len += rx->status;
1054
1055                 i = xennet_fill_frags(queue, skb, &tmpq);
1056
1057                 if (rx->flags & XEN_NETRXF_csum_blank)
1058                         skb->ip_summed = CHECKSUM_PARTIAL;
1059                 else if (rx->flags & XEN_NETRXF_data_validated)
1060                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1061
1062                 __skb_queue_tail(&rxq, skb);
1063
1064                 queue->rx.rsp_cons = ++i;
1065                 work_done++;
1066         }
1067
1068         __skb_queue_purge(&errq);
1069
1070         work_done -= handle_incoming_queue(queue, &rxq);
1071
1072         /* If we get a callback with very few responses, reduce fill target. */
1073         /* NB. Note exponential increase, linear decrease. */
1074         if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) >
1075              ((3*queue->rx_target) / 4)) &&
1076             (--queue->rx_target < queue->rx_min_target))
1077                 queue->rx_target = queue->rx_min_target;
1078
1079         xennet_alloc_rx_buffers(queue);
1080
1081         if (work_done < budget) {
1082                 int more_to_do = 0;
1083
1084                 napi_gro_flush(napi, false);
1085
1086                 local_irq_save(flags);
1087
1088                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1089                 if (!more_to_do)
1090                         __napi_complete(napi);
1091
1092                 local_irq_restore(flags);
1093         }
1094
1095         spin_unlock(&queue->rx_lock);
1096
1097         return work_done;
1098 }
1099
1100 static int xennet_change_mtu(struct net_device *dev, int mtu)
1101 {
1102         int max = xennet_can_sg(dev) ?
1103                 XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
1104
1105         if (mtu > max)
1106                 return -EINVAL;
1107         dev->mtu = mtu;
1108         return 0;
1109 }
1110
1111 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1112                                                     struct rtnl_link_stats64 *tot)
1113 {
1114         struct netfront_info *np = netdev_priv(dev);
1115         int cpu;
1116
1117         for_each_possible_cpu(cpu) {
1118                 struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1119                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1120                 unsigned int start;
1121
1122                 do {
1123                         start = u64_stats_fetch_begin_irq(&stats->syncp);
1124
1125                         rx_packets = stats->rx_packets;
1126                         tx_packets = stats->tx_packets;
1127                         rx_bytes = stats->rx_bytes;
1128                         tx_bytes = stats->tx_bytes;
1129                 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1130
1131                 tot->rx_packets += rx_packets;
1132                 tot->tx_packets += tx_packets;
1133                 tot->rx_bytes   += rx_bytes;
1134                 tot->tx_bytes   += tx_bytes;
1135         }
1136
1137         tot->rx_errors  = dev->stats.rx_errors;
1138         tot->tx_dropped = dev->stats.tx_dropped;
1139
1140         return tot;
1141 }
1142
1143 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1144 {
1145         struct sk_buff *skb;
1146         int i;
1147
1148         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1149                 /* Skip over entries which are actually freelist references */
1150                 if (skb_entry_is_link(&queue->tx_skbs[i]))
1151                         continue;
1152
1153                 skb = queue->tx_skbs[i].skb;
1154                 get_page(queue->grant_tx_page[i]);
1155                 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1156                                           GNTMAP_readonly,
1157                                           (unsigned long)page_address(queue->grant_tx_page[i]));
1158                 queue->grant_tx_page[i] = NULL;
1159                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1160                 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1161                 dev_kfree_skb_irq(skb);
1162         }
1163 }
1164
1165 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1166 {
1167         int id, ref;
1168
1169         spin_lock_bh(&queue->rx_lock);
1170
1171         for (id = 0; id < NET_RX_RING_SIZE; id++) {
1172                 struct sk_buff *skb;
1173                 struct page *page;
1174
1175                 skb = queue->rx_skbs[id];
1176                 if (!skb)
1177                         continue;
1178
1179                 ref = queue->grant_rx_ref[id];
1180                 if (ref == GRANT_INVALID_REF)
1181                         continue;
1182
1183                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1184
1185                 /* gnttab_end_foreign_access() needs a page ref until
1186                  * foreign access is ended (which may be deferred).
1187                  */
1188                 get_page(page);
1189                 gnttab_end_foreign_access(ref, 0,
1190                                           (unsigned long)page_address(page));
1191                 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1192
1193                 kfree_skb(skb);
1194         }
1195
1196         spin_unlock_bh(&queue->rx_lock);
1197 }
1198
1199 static void xennet_uninit(struct net_device *dev)
1200 {
1201         struct netfront_info *np = netdev_priv(dev);
1202         unsigned int num_queues = dev->real_num_tx_queues;
1203         struct netfront_queue *queue;
1204         unsigned int i;
1205
1206         for (i = 0; i < num_queues; ++i) {
1207                 queue = &np->queues[i];
1208                 xennet_release_tx_bufs(queue);
1209                 xennet_release_rx_bufs(queue);
1210                 gnttab_free_grant_references(queue->gref_tx_head);
1211                 gnttab_free_grant_references(queue->gref_rx_head);
1212         }
1213 }
1214
1215 static netdev_features_t xennet_fix_features(struct net_device *dev,
1216         netdev_features_t features)
1217 {
1218         struct netfront_info *np = netdev_priv(dev);
1219         int val;
1220
1221         if (features & NETIF_F_SG) {
1222                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1223                                  "%d", &val) < 0)
1224                         val = 0;
1225
1226                 if (!val)
1227                         features &= ~NETIF_F_SG;
1228         }
1229
1230         if (features & NETIF_F_IPV6_CSUM) {
1231                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1232                                  "feature-ipv6-csum-offload", "%d", &val) < 0)
1233                         val = 0;
1234
1235                 if (!val)
1236                         features &= ~NETIF_F_IPV6_CSUM;
1237         }
1238
1239         if (features & NETIF_F_TSO) {
1240                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1241                                  "feature-gso-tcpv4", "%d", &val) < 0)
1242                         val = 0;
1243
1244                 if (!val)
1245                         features &= ~NETIF_F_TSO;
1246         }
1247
1248         if (features & NETIF_F_TSO6) {
1249                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1250                                  "feature-gso-tcpv6", "%d", &val) < 0)
1251                         val = 0;
1252
1253                 if (!val)
1254                         features &= ~NETIF_F_TSO6;
1255         }
1256
1257         return features;
1258 }
1259
1260 static int xennet_set_features(struct net_device *dev,
1261         netdev_features_t features)
1262 {
1263         if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1264                 netdev_info(dev, "Reducing MTU because no SG offload");
1265                 dev->mtu = ETH_DATA_LEN;
1266         }
1267
1268         return 0;
1269 }
1270
1271 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1272 {
1273         struct netfront_queue *queue = dev_id;
1274         unsigned long flags;
1275
1276         spin_lock_irqsave(&queue->tx_lock, flags);
1277         xennet_tx_buf_gc(queue);
1278         spin_unlock_irqrestore(&queue->tx_lock, flags);
1279
1280         return IRQ_HANDLED;
1281 }
1282
1283 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1284 {
1285         struct netfront_queue *queue = dev_id;
1286         struct net_device *dev = queue->info->netdev;
1287
1288         if (likely(netif_carrier_ok(dev) &&
1289                    RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1290                         napi_schedule(&queue->napi);
1291
1292         return IRQ_HANDLED;
1293 }
1294
1295 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1296 {
1297         xennet_tx_interrupt(irq, dev_id);
1298         xennet_rx_interrupt(irq, dev_id);
1299         return IRQ_HANDLED;
1300 }
1301
1302 #ifdef CONFIG_NET_POLL_CONTROLLER
1303 static void xennet_poll_controller(struct net_device *dev)
1304 {
1305         /* Poll each queue */
1306         struct netfront_info *info = netdev_priv(dev);
1307         unsigned int num_queues = dev->real_num_tx_queues;
1308         unsigned int i;
1309         for (i = 0; i < num_queues; ++i)
1310                 xennet_interrupt(0, &info->queues[i]);
1311 }
1312 #endif
1313
1314 static const struct net_device_ops xennet_netdev_ops = {
1315         .ndo_open            = xennet_open,
1316         .ndo_uninit          = xennet_uninit,
1317         .ndo_stop            = xennet_close,
1318         .ndo_start_xmit      = xennet_start_xmit,
1319         .ndo_change_mtu      = xennet_change_mtu,
1320         .ndo_get_stats64     = xennet_get_stats64,
1321         .ndo_set_mac_address = eth_mac_addr,
1322         .ndo_validate_addr   = eth_validate_addr,
1323         .ndo_fix_features    = xennet_fix_features,
1324         .ndo_set_features    = xennet_set_features,
1325         .ndo_select_queue    = xennet_select_queue,
1326 #ifdef CONFIG_NET_POLL_CONTROLLER
1327         .ndo_poll_controller = xennet_poll_controller,
1328 #endif
1329 };
1330
1331 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1332 {
1333         int err;
1334         struct net_device *netdev;
1335         struct netfront_info *np;
1336
1337         netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1338         if (!netdev)
1339                 return ERR_PTR(-ENOMEM);
1340
1341         np                   = netdev_priv(netdev);
1342         np->xbdev            = dev;
1343
1344         /* No need to use rtnl_lock() before the call below as it
1345          * happens before register_netdev().
1346          */
1347         netif_set_real_num_tx_queues(netdev, 0);
1348         np->queues = NULL;
1349
1350         err = -ENOMEM;
1351         np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1352         if (np->stats == NULL)
1353                 goto exit;
1354
1355         netdev->netdev_ops      = &xennet_netdev_ops;
1356
1357         netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1358                                   NETIF_F_GSO_ROBUST;
1359         netdev->hw_features     = NETIF_F_SG |
1360                                   NETIF_F_IPV6_CSUM |
1361                                   NETIF_F_TSO | NETIF_F_TSO6;
1362
1363         /*
1364          * Assume that all hw features are available for now. This set
1365          * will be adjusted by the call to netdev_update_features() in
1366          * xennet_connect() which is the earliest point where we can
1367          * negotiate with the backend regarding supported features.
1368          */
1369         netdev->features |= netdev->hw_features;
1370
1371         netdev->ethtool_ops = &xennet_ethtool_ops;
1372         SET_NETDEV_DEV(netdev, &dev->dev);
1373
1374         netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1375
1376         np->netdev = netdev;
1377
1378         netif_carrier_off(netdev);
1379
1380         return netdev;
1381
1382  exit:
1383         free_netdev(netdev);
1384         return ERR_PTR(err);
1385 }
1386
1387 /**
1388  * Entry point to this code when a new device is created.  Allocate the basic
1389  * structures and the ring buffers for communication with the backend, and
1390  * inform the backend of the appropriate details for those.
1391  */
1392 static int netfront_probe(struct xenbus_device *dev,
1393                           const struct xenbus_device_id *id)
1394 {
1395         int err;
1396         struct net_device *netdev;
1397         struct netfront_info *info;
1398
1399         netdev = xennet_create_dev(dev);
1400         if (IS_ERR(netdev)) {
1401                 err = PTR_ERR(netdev);
1402                 xenbus_dev_fatal(dev, err, "creating netdev");
1403                 return err;
1404         }
1405
1406         info = netdev_priv(netdev);
1407         dev_set_drvdata(&dev->dev, info);
1408
1409         err = register_netdev(info->netdev);
1410         if (err) {
1411                 pr_warn("%s: register_netdev err=%d\n", __func__, err);
1412                 goto fail;
1413         }
1414
1415         err = xennet_sysfs_addif(info->netdev);
1416         if (err) {
1417                 unregister_netdev(info->netdev);
1418                 pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1419                 goto fail;
1420         }
1421
1422         return 0;
1423
1424  fail:
1425         free_netdev(netdev);
1426         dev_set_drvdata(&dev->dev, NULL);
1427         return err;
1428 }
1429
1430 static void xennet_end_access(int ref, void *page)
1431 {
1432         /* This frees the page as a side-effect */
1433         if (ref != GRANT_INVALID_REF)
1434                 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1435 }
1436
1437 static void xennet_disconnect_backend(struct netfront_info *info)
1438 {
1439         unsigned int i = 0;
1440         struct netfront_queue *queue = NULL;
1441         unsigned int num_queues = info->netdev->real_num_tx_queues;
1442
1443         for (i = 0; i < num_queues; ++i) {
1444                 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1445                 spin_lock_bh(&queue->rx_lock);
1446                 spin_lock_irq(&queue->tx_lock);
1447                 netif_carrier_off(queue->info->netdev);
1448                 spin_unlock_irq(&queue->tx_lock);
1449                 spin_unlock_bh(&queue->rx_lock);
1450
1451                 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1452                         unbind_from_irqhandler(queue->tx_irq, queue);
1453                 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1454                         unbind_from_irqhandler(queue->tx_irq, queue);
1455                         unbind_from_irqhandler(queue->rx_irq, queue);
1456                 }
1457                 queue->tx_evtchn = queue->rx_evtchn = 0;
1458                 queue->tx_irq = queue->rx_irq = 0;
1459
1460                 /* End access and free the pages */
1461                 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1462                 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1463
1464                 queue->tx_ring_ref = GRANT_INVALID_REF;
1465                 queue->rx_ring_ref = GRANT_INVALID_REF;
1466                 queue->tx.sring = NULL;
1467                 queue->rx.sring = NULL;
1468         }
1469 }
1470
1471 /**
1472  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1473  * driver restart.  We tear down our netif structure and recreate it, but
1474  * leave the device-layer structures intact so that this is transparent to the
1475  * rest of the kernel.
1476  */
1477 static int netfront_resume(struct xenbus_device *dev)
1478 {
1479         struct netfront_info *info = dev_get_drvdata(&dev->dev);
1480
1481         dev_dbg(&dev->dev, "%s\n", dev->nodename);
1482
1483         xennet_disconnect_backend(info);
1484         return 0;
1485 }
1486
1487 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1488 {
1489         char *s, *e, *macstr;
1490         int i;
1491
1492         macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1493         if (IS_ERR(macstr))
1494                 return PTR_ERR(macstr);
1495
1496         for (i = 0; i < ETH_ALEN; i++) {
1497                 mac[i] = simple_strtoul(s, &e, 16);
1498                 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1499                         kfree(macstr);
1500                         return -ENOENT;
1501                 }
1502                 s = e+1;
1503         }
1504
1505         kfree(macstr);
1506         return 0;
1507 }
1508
1509 static int setup_netfront_single(struct netfront_queue *queue)
1510 {
1511         int err;
1512
1513         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1514         if (err < 0)
1515                 goto fail;
1516
1517         err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1518                                         xennet_interrupt,
1519                                         0, queue->info->netdev->name, queue);
1520         if (err < 0)
1521                 goto bind_fail;
1522         queue->rx_evtchn = queue->tx_evtchn;
1523         queue->rx_irq = queue->tx_irq = err;
1524
1525         return 0;
1526
1527 bind_fail:
1528         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1529         queue->tx_evtchn = 0;
1530 fail:
1531         return err;
1532 }
1533
1534 static int setup_netfront_split(struct netfront_queue *queue)
1535 {
1536         int err;
1537
1538         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1539         if (err < 0)
1540                 goto fail;
1541         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1542         if (err < 0)
1543                 goto alloc_rx_evtchn_fail;
1544
1545         snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1546                  "%s-tx", queue->name);
1547         err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1548                                         xennet_tx_interrupt,
1549                                         0, queue->tx_irq_name, queue);
1550         if (err < 0)
1551                 goto bind_tx_fail;
1552         queue->tx_irq = err;
1553
1554         snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1555                  "%s-rx", queue->name);
1556         err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1557                                         xennet_rx_interrupt,
1558                                         0, queue->rx_irq_name, queue);
1559         if (err < 0)
1560                 goto bind_rx_fail;
1561         queue->rx_irq = err;
1562
1563         return 0;
1564
1565 bind_rx_fail:
1566         unbind_from_irqhandler(queue->tx_irq, queue);
1567         queue->tx_irq = 0;
1568 bind_tx_fail:
1569         xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1570         queue->rx_evtchn = 0;
1571 alloc_rx_evtchn_fail:
1572         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1573         queue->tx_evtchn = 0;
1574 fail:
1575         return err;
1576 }
1577
1578 static int setup_netfront(struct xenbus_device *dev,
1579                         struct netfront_queue *queue, unsigned int feature_split_evtchn)
1580 {
1581         struct xen_netif_tx_sring *txs;
1582         struct xen_netif_rx_sring *rxs;
1583         int err;
1584
1585         queue->tx_ring_ref = GRANT_INVALID_REF;
1586         queue->rx_ring_ref = GRANT_INVALID_REF;
1587         queue->rx.sring = NULL;
1588         queue->tx.sring = NULL;
1589
1590         txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1591         if (!txs) {
1592                 err = -ENOMEM;
1593                 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1594                 goto fail;
1595         }
1596         SHARED_RING_INIT(txs);
1597         FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1598
1599         err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1600         if (err < 0)
1601                 goto grant_tx_ring_fail;
1602         queue->tx_ring_ref = err;
1603
1604         rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1605         if (!rxs) {
1606                 err = -ENOMEM;
1607                 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1608                 goto alloc_rx_ring_fail;
1609         }
1610         SHARED_RING_INIT(rxs);
1611         FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1612
1613         err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1614         if (err < 0)
1615                 goto grant_rx_ring_fail;
1616         queue->rx_ring_ref = err;
1617
1618         if (feature_split_evtchn)
1619                 err = setup_netfront_split(queue);
1620         /* setup single event channel if
1621          *  a) feature-split-event-channels == 0
1622          *  b) feature-split-event-channels == 1 but failed to setup
1623          */
1624         if (!feature_split_evtchn || (feature_split_evtchn && err))
1625                 err = setup_netfront_single(queue);
1626
1627         if (err)
1628                 goto alloc_evtchn_fail;
1629
1630         return 0;
1631
1632         /* If we fail to setup netfront, it is safe to just revoke access to
1633          * granted pages because backend is not accessing it at this point.
1634          */
1635 alloc_evtchn_fail:
1636         gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1637 grant_rx_ring_fail:
1638         free_page((unsigned long)rxs);
1639 alloc_rx_ring_fail:
1640         gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1641 grant_tx_ring_fail:
1642         free_page((unsigned long)txs);
1643 fail:
1644         return err;
1645 }
1646
1647 /* Queue-specific initialisation
1648  * This used to be done in xennet_create_dev() but must now
1649  * be run per-queue.
1650  */
1651 static int xennet_init_queue(struct netfront_queue *queue)
1652 {
1653         unsigned short i;
1654         int err = 0;
1655
1656         spin_lock_init(&queue->tx_lock);
1657         spin_lock_init(&queue->rx_lock);
1658
1659         skb_queue_head_init(&queue->rx_batch);
1660         queue->rx_target     = RX_DFL_MIN_TARGET;
1661         queue->rx_min_target = RX_DFL_MIN_TARGET;
1662         queue->rx_max_target = RX_MAX_TARGET;
1663
1664         init_timer(&queue->rx_refill_timer);
1665         queue->rx_refill_timer.data = (unsigned long)queue;
1666         queue->rx_refill_timer.function = rx_refill_timeout;
1667
1668         snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1669                  queue->info->netdev->name, queue->id);
1670
1671         /* Initialise tx_skbs as a free chain containing every entry. */
1672         queue->tx_skb_freelist = 0;
1673         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1674                 skb_entry_set_link(&queue->tx_skbs[i], i+1);
1675                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1676                 queue->grant_tx_page[i] = NULL;
1677         }
1678
1679         /* Clear out rx_skbs */
1680         for (i = 0; i < NET_RX_RING_SIZE; i++) {
1681                 queue->rx_skbs[i] = NULL;
1682                 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1683         }
1684
1685         /* A grant for every tx ring slot */
1686         if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1687                                           &queue->gref_tx_head) < 0) {
1688                 pr_alert("can't alloc tx grant refs\n");
1689                 err = -ENOMEM;
1690                 goto exit;
1691         }
1692
1693         /* A grant for every rx ring slot */
1694         if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1695                                           &queue->gref_rx_head) < 0) {
1696                 pr_alert("can't alloc rx grant refs\n");
1697                 err = -ENOMEM;
1698                 goto exit_free_tx;
1699         }
1700
1701         netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
1702
1703         return 0;
1704
1705  exit_free_tx:
1706         gnttab_free_grant_references(queue->gref_tx_head);
1707  exit:
1708         return err;
1709 }
1710
1711 static int write_queue_xenstore_keys(struct netfront_queue *queue,
1712                            struct xenbus_transaction *xbt, int write_hierarchical)
1713 {
1714         /* Write the queue-specific keys into XenStore in the traditional
1715          * way for a single queue, or in a queue subkeys for multiple
1716          * queues.
1717          */
1718         struct xenbus_device *dev = queue->info->xbdev;
1719         int err;
1720         const char *message;
1721         char *path;
1722         size_t pathsize;
1723
1724         /* Choose the correct place to write the keys */
1725         if (write_hierarchical) {
1726                 pathsize = strlen(dev->nodename) + 10;
1727                 path = kzalloc(pathsize, GFP_KERNEL);
1728                 if (!path) {
1729                         err = -ENOMEM;
1730                         message = "out of memory while writing ring references";
1731                         goto error;
1732                 }
1733                 snprintf(path, pathsize, "%s/queue-%u",
1734                                 dev->nodename, queue->id);
1735         } else {
1736                 path = (char *)dev->nodename;
1737         }
1738
1739         /* Write ring references */
1740         err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1741                         queue->tx_ring_ref);
1742         if (err) {
1743                 message = "writing tx-ring-ref";
1744                 goto error;
1745         }
1746
1747         err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1748                         queue->rx_ring_ref);
1749         if (err) {
1750                 message = "writing rx-ring-ref";
1751                 goto error;
1752         }
1753
1754         /* Write event channels; taking into account both shared
1755          * and split event channel scenarios.
1756          */
1757         if (queue->tx_evtchn == queue->rx_evtchn) {
1758                 /* Shared event channel */
1759                 err = xenbus_printf(*xbt, path,
1760                                 "event-channel", "%u", queue->tx_evtchn);
1761                 if (err) {
1762                         message = "writing event-channel";
1763                         goto error;
1764                 }
1765         } else {
1766                 /* Split event channels */
1767                 err = xenbus_printf(*xbt, path,
1768                                 "event-channel-tx", "%u", queue->tx_evtchn);
1769                 if (err) {
1770                         message = "writing event-channel-tx";
1771                         goto error;
1772                 }
1773
1774                 err = xenbus_printf(*xbt, path,
1775                                 "event-channel-rx", "%u", queue->rx_evtchn);
1776                 if (err) {
1777                         message = "writing event-channel-rx";
1778                         goto error;
1779                 }
1780         }
1781
1782         if (write_hierarchical)
1783                 kfree(path);
1784         return 0;
1785
1786 error:
1787         if (write_hierarchical)
1788                 kfree(path);
1789         xenbus_dev_fatal(dev, err, "%s", message);
1790         return err;
1791 }
1792
1793 /* Common code used when first setting up, and when resuming. */
1794 static int talk_to_netback(struct xenbus_device *dev,
1795                            struct netfront_info *info)
1796 {
1797         const char *message;
1798         struct xenbus_transaction xbt;
1799         int err;
1800         unsigned int feature_split_evtchn;
1801         unsigned int i = 0;
1802         unsigned int max_queues = 0;
1803         struct netfront_queue *queue = NULL;
1804         unsigned int num_queues = 1;
1805
1806         info->netdev->irq = 0;
1807
1808         /* Check if backend supports multiple queues */
1809         err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1810                            "multi-queue-max-queues", "%u", &max_queues);
1811         if (err < 0)
1812                 max_queues = 1;
1813         num_queues = min(max_queues, xennet_max_queues);
1814
1815         /* Check feature-split-event-channels */
1816         err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1817                            "feature-split-event-channels", "%u",
1818                            &feature_split_evtchn);
1819         if (err < 0)
1820                 feature_split_evtchn = 0;
1821
1822         /* Read mac addr. */
1823         err = xen_net_read_mac(dev, info->netdev->dev_addr);
1824         if (err) {
1825                 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1826                 goto out;
1827         }
1828
1829         /* Allocate array of queues */
1830         info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL);
1831         if (!info->queues) {
1832                 err = -ENOMEM;
1833                 goto out;
1834         }
1835         rtnl_lock();
1836         netif_set_real_num_tx_queues(info->netdev, num_queues);
1837         rtnl_unlock();
1838
1839         /* Create shared ring, alloc event channel -- for each queue */
1840         for (i = 0; i < num_queues; ++i) {
1841                 queue = &info->queues[i];
1842                 queue->id = i;
1843                 queue->info = info;
1844                 err = xennet_init_queue(queue);
1845                 if (err) {
1846                         /* xennet_init_queue() cleans up after itself on failure,
1847                          * but we still have to clean up any previously initialised
1848                          * queues. If i > 0, set num_queues to i, then goto
1849                          * destroy_ring, which calls xennet_disconnect_backend()
1850                          * to tidy up.
1851                          */
1852                         if (i > 0) {
1853                                 rtnl_lock();
1854                                 netif_set_real_num_tx_queues(info->netdev, i);
1855                                 rtnl_unlock();
1856                                 goto destroy_ring;
1857                         } else {
1858                                 goto out;
1859                         }
1860                 }
1861                 err = setup_netfront(dev, queue, feature_split_evtchn);
1862                 if (err) {
1863                         /* As for xennet_init_queue(), setup_netfront() will tidy
1864                          * up the current queue on error, but we need to clean up
1865                          * those already allocated.
1866                          */
1867                         if (i > 0) {
1868                                 rtnl_lock();
1869                                 netif_set_real_num_tx_queues(info->netdev, i);
1870                                 rtnl_unlock();
1871                                 goto destroy_ring;
1872                         } else {
1873                                 goto out;
1874                         }
1875                 }
1876         }
1877
1878 again:
1879         err = xenbus_transaction_start(&xbt);
1880         if (err) {
1881                 xenbus_dev_fatal(dev, err, "starting transaction");
1882                 goto destroy_ring;
1883         }
1884
1885         if (num_queues == 1) {
1886                 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1887                 if (err)
1888                         goto abort_transaction_no_dev_fatal;
1889         } else {
1890                 /* Write the number of queues */
1891                 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
1892                                     "%u", num_queues);
1893                 if (err) {
1894                         message = "writing multi-queue-num-queues";
1895                         goto abort_transaction_no_dev_fatal;
1896                 }
1897
1898                 /* Write the keys for each queue */
1899                 for (i = 0; i < num_queues; ++i) {
1900                         queue = &info->queues[i];
1901                         err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1902                         if (err)
1903                                 goto abort_transaction_no_dev_fatal;
1904                 }
1905         }
1906
1907         /* The remaining keys are not queue-specific */
1908         err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1909                             1);
1910         if (err) {
1911                 message = "writing request-rx-copy";
1912                 goto abort_transaction;
1913         }
1914
1915         err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1916         if (err) {
1917                 message = "writing feature-rx-notify";
1918                 goto abort_transaction;
1919         }
1920
1921         err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1922         if (err) {
1923                 message = "writing feature-sg";
1924                 goto abort_transaction;
1925         }
1926
1927         err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1928         if (err) {
1929                 message = "writing feature-gso-tcpv4";
1930                 goto abort_transaction;
1931         }
1932
1933         err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1934         if (err) {
1935                 message = "writing feature-gso-tcpv6";
1936                 goto abort_transaction;
1937         }
1938
1939         err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1940                            "1");
1941         if (err) {
1942                 message = "writing feature-ipv6-csum-offload";
1943                 goto abort_transaction;
1944         }
1945
1946         err = xenbus_transaction_end(xbt, 0);
1947         if (err) {
1948                 if (err == -EAGAIN)
1949                         goto again;
1950                 xenbus_dev_fatal(dev, err, "completing transaction");
1951                 goto destroy_ring;
1952         }
1953
1954         return 0;
1955
1956  abort_transaction:
1957         xenbus_dev_fatal(dev, err, "%s", message);
1958 abort_transaction_no_dev_fatal:
1959         xenbus_transaction_end(xbt, 1);
1960  destroy_ring:
1961         xennet_disconnect_backend(info);
1962         kfree(info->queues);
1963         info->queues = NULL;
1964         rtnl_lock();
1965         netif_set_real_num_tx_queues(info->netdev, 0);
1966         rtnl_lock();
1967  out:
1968         return err;
1969 }
1970
1971 static int xennet_connect(struct net_device *dev)
1972 {
1973         struct netfront_info *np = netdev_priv(dev);
1974         unsigned int num_queues = 0;
1975         int i, requeue_idx, err;
1976         struct sk_buff *skb;
1977         grant_ref_t ref;
1978         struct xen_netif_rx_request *req;
1979         unsigned int feature_rx_copy;
1980         unsigned int j = 0;
1981         struct netfront_queue *queue = NULL;
1982
1983         err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1984                            "feature-rx-copy", "%u", &feature_rx_copy);
1985         if (err != 1)
1986                 feature_rx_copy = 0;
1987
1988         if (!feature_rx_copy) {
1989                 dev_info(&dev->dev,
1990                          "backend does not support copying receive path\n");
1991                 return -ENODEV;
1992         }
1993
1994         err = talk_to_netback(np->xbdev, np);
1995         if (err)
1996                 return err;
1997
1998         /* talk_to_netback() sets the correct number of queues */
1999         num_queues = dev->real_num_tx_queues;
2000
2001         rtnl_lock();
2002         netdev_update_features(dev);
2003         rtnl_unlock();
2004
2005         /* By now, the queue structures have been set up */
2006         for (j = 0; j < num_queues; ++j) {
2007                 queue = &np->queues[j];
2008                 spin_lock_bh(&queue->rx_lock);
2009                 spin_lock_irq(&queue->tx_lock);
2010
2011                 /* Step 1: Discard all pending TX packet fragments. */
2012                 xennet_release_tx_bufs(queue);
2013
2014                 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
2015                 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
2016                         skb_frag_t *frag;
2017                         const struct page *page;
2018                         if (!queue->rx_skbs[i])
2019                                 continue;
2020
2021                         skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
2022                         ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
2023                         req = RING_GET_REQUEST(&queue->rx, requeue_idx);
2024
2025                         frag = &skb_shinfo(skb)->frags[0];
2026                         page = skb_frag_page(frag);
2027                         gnttab_grant_foreign_access_ref(
2028                                 ref, queue->info->xbdev->otherend_id,
2029                                 pfn_to_mfn(page_to_pfn(page)),
2030                                 0);
2031                         req->gref = ref;
2032                         req->id   = requeue_idx;
2033
2034                         requeue_idx++;
2035                 }
2036
2037                 queue->rx.req_prod_pvt = requeue_idx;
2038         }
2039
2040         /*
2041          * Step 3: All public and private state should now be sane.  Get
2042          * ready to start sending and receiving packets and give the driver
2043          * domain a kick because we've probably just requeued some
2044          * packets.
2045          */
2046         netif_carrier_on(np->netdev);
2047         for (j = 0; j < num_queues; ++j) {
2048                 queue = &np->queues[j];
2049                 notify_remote_via_irq(queue->tx_irq);
2050                 if (queue->tx_irq != queue->rx_irq)
2051                         notify_remote_via_irq(queue->rx_irq);
2052                 xennet_tx_buf_gc(queue);
2053                 xennet_alloc_rx_buffers(queue);
2054
2055                 spin_unlock_irq(&queue->tx_lock);
2056                 spin_unlock_bh(&queue->rx_lock);
2057         }
2058
2059         return 0;
2060 }
2061
2062 /**
2063  * Callback received when the backend's state changes.
2064  */
2065 static void netback_changed(struct xenbus_device *dev,
2066                             enum xenbus_state backend_state)
2067 {
2068         struct netfront_info *np = dev_get_drvdata(&dev->dev);
2069         struct net_device *netdev = np->netdev;
2070
2071         dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2072
2073         switch (backend_state) {
2074         case XenbusStateInitialising:
2075         case XenbusStateInitialised:
2076         case XenbusStateReconfiguring:
2077         case XenbusStateReconfigured:
2078         case XenbusStateUnknown:
2079                 break;
2080
2081         case XenbusStateInitWait:
2082                 if (dev->state != XenbusStateInitialising)
2083                         break;
2084                 if (xennet_connect(netdev) != 0)
2085                         break;
2086                 xenbus_switch_state(dev, XenbusStateConnected);
2087                 break;
2088
2089         case XenbusStateConnected:
2090                 netdev_notify_peers(netdev);
2091                 break;
2092
2093         case XenbusStateClosed:
2094                 if (dev->state == XenbusStateClosed)
2095                         break;
2096                 /* Missed the backend's CLOSING state -- fallthrough */
2097         case XenbusStateClosing:
2098                 xenbus_frontend_closed(dev);
2099                 break;
2100         }
2101 }
2102
2103 static const struct xennet_stat {
2104         char name[ETH_GSTRING_LEN];
2105         u16 offset;
2106 } xennet_stats[] = {
2107         {
2108                 "rx_gso_checksum_fixup",
2109                 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2110         },
2111 };
2112
2113 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2114 {
2115         switch (string_set) {
2116         case ETH_SS_STATS:
2117                 return ARRAY_SIZE(xennet_stats);
2118         default:
2119                 return -EINVAL;
2120         }
2121 }
2122
2123 static void xennet_get_ethtool_stats(struct net_device *dev,
2124                                      struct ethtool_stats *stats, u64 * data)
2125 {
2126         void *np = netdev_priv(dev);
2127         int i;
2128
2129         for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2130                 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2131 }
2132
2133 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2134 {
2135         int i;
2136
2137         switch (stringset) {
2138         case ETH_SS_STATS:
2139                 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2140                         memcpy(data + i * ETH_GSTRING_LEN,
2141                                xennet_stats[i].name, ETH_GSTRING_LEN);
2142                 break;
2143         }
2144 }
2145
2146 static const struct ethtool_ops xennet_ethtool_ops =
2147 {
2148         .get_link = ethtool_op_get_link,
2149
2150         .get_sset_count = xennet_get_sset_count,
2151         .get_ethtool_stats = xennet_get_ethtool_stats,
2152         .get_strings = xennet_get_strings,
2153 };
2154
2155 #ifdef CONFIG_SYSFS
2156 static ssize_t show_rxbuf_min(struct device *dev,
2157                               struct device_attribute *attr, char *buf)
2158 {
2159         struct net_device *netdev = to_net_dev(dev);
2160         struct netfront_info *info = netdev_priv(netdev);
2161         unsigned int num_queues = netdev->real_num_tx_queues;
2162
2163         if (num_queues)
2164                 return sprintf(buf, "%u\n", info->queues[0].rx_min_target);
2165         else
2166                 return sprintf(buf, "%u\n", RX_MIN_TARGET);
2167 }
2168
2169 static ssize_t store_rxbuf_min(struct device *dev,
2170                                struct device_attribute *attr,
2171                                const char *buf, size_t len)
2172 {
2173         struct net_device *netdev = to_net_dev(dev);
2174         struct netfront_info *np = netdev_priv(netdev);
2175         unsigned int num_queues = netdev->real_num_tx_queues;
2176         char *endp;
2177         unsigned long target;
2178         unsigned int i;
2179         struct netfront_queue *queue;
2180
2181         if (!capable(CAP_NET_ADMIN))
2182                 return -EPERM;
2183
2184         target = simple_strtoul(buf, &endp, 0);
2185         if (endp == buf)
2186                 return -EBADMSG;
2187
2188         if (target < RX_MIN_TARGET)
2189                 target = RX_MIN_TARGET;
2190         if (target > RX_MAX_TARGET)
2191                 target = RX_MAX_TARGET;
2192
2193         for (i = 0; i < num_queues; ++i) {
2194                 queue = &np->queues[i];
2195                 spin_lock_bh(&queue->rx_lock);
2196                 if (target > queue->rx_max_target)
2197                         queue->rx_max_target = target;
2198                 queue->rx_min_target = target;
2199                 if (target > queue->rx_target)
2200                         queue->rx_target = target;
2201
2202                 xennet_alloc_rx_buffers(queue);
2203
2204                 spin_unlock_bh(&queue->rx_lock);
2205         }
2206         return len;
2207 }
2208
2209 static ssize_t show_rxbuf_max(struct device *dev,
2210                               struct device_attribute *attr, char *buf)
2211 {
2212         struct net_device *netdev = to_net_dev(dev);
2213         struct netfront_info *info = netdev_priv(netdev);
2214         unsigned int num_queues = netdev->real_num_tx_queues;
2215
2216         if (num_queues)
2217                 return sprintf(buf, "%u\n", info->queues[0].rx_max_target);
2218         else
2219                 return sprintf(buf, "%u\n", RX_MAX_TARGET);
2220 }
2221
2222 static ssize_t store_rxbuf_max(struct device *dev,
2223                                struct device_attribute *attr,
2224                                const char *buf, size_t len)
2225 {
2226         struct net_device *netdev = to_net_dev(dev);
2227         struct netfront_info *np = netdev_priv(netdev);
2228         unsigned int num_queues = netdev->real_num_tx_queues;
2229         char *endp;
2230         unsigned long target;
2231         unsigned int i = 0;
2232         struct netfront_queue *queue = NULL;
2233
2234         if (!capable(CAP_NET_ADMIN))
2235                 return -EPERM;
2236
2237         target = simple_strtoul(buf, &endp, 0);
2238         if (endp == buf)
2239                 return -EBADMSG;
2240
2241         if (target < RX_MIN_TARGET)
2242                 target = RX_MIN_TARGET;
2243         if (target > RX_MAX_TARGET)
2244                 target = RX_MAX_TARGET;
2245
2246         for (i = 0; i < num_queues; ++i) {
2247                 queue = &np->queues[i];
2248                 spin_lock_bh(&queue->rx_lock);
2249                 if (target < queue->rx_min_target)
2250                         queue->rx_min_target = target;
2251                 queue->rx_max_target = target;
2252                 if (target < queue->rx_target)
2253                         queue->rx_target = target;
2254
2255                 xennet_alloc_rx_buffers(queue);
2256
2257                 spin_unlock_bh(&queue->rx_lock);
2258         }
2259         return len;
2260 }
2261
2262 static ssize_t show_rxbuf_cur(struct device *dev,
2263                               struct device_attribute *attr, char *buf)
2264 {
2265         struct net_device *netdev = to_net_dev(dev);
2266         struct netfront_info *info = netdev_priv(netdev);
2267         unsigned int num_queues = netdev->real_num_tx_queues;
2268
2269         if (num_queues)
2270                 return sprintf(buf, "%u\n", info->queues[0].rx_target);
2271         else
2272                 return sprintf(buf, "0\n");
2273 }
2274
2275 static struct device_attribute xennet_attrs[] = {
2276         __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
2277         __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
2278         __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
2279 };
2280
2281 static int xennet_sysfs_addif(struct net_device *netdev)
2282 {
2283         int i;
2284         int err;
2285
2286         for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2287                 err = device_create_file(&netdev->dev,
2288                                            &xennet_attrs[i]);
2289                 if (err)
2290                         goto fail;
2291         }
2292         return 0;
2293
2294  fail:
2295         while (--i >= 0)
2296                 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2297         return err;
2298 }
2299
2300 static void xennet_sysfs_delif(struct net_device *netdev)
2301 {
2302         int i;
2303
2304         for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2305                 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2306 }
2307
2308 #endif /* CONFIG_SYSFS */
2309
2310 static const struct xenbus_device_id netfront_ids[] = {
2311         { "vif" },
2312         { "" }
2313 };
2314
2315
2316 static int xennet_remove(struct xenbus_device *dev)
2317 {
2318         struct netfront_info *info = dev_get_drvdata(&dev->dev);
2319         unsigned int num_queues = info->netdev->real_num_tx_queues;
2320         struct netfront_queue *queue = NULL;
2321         unsigned int i = 0;
2322
2323         dev_dbg(&dev->dev, "%s\n", dev->nodename);
2324
2325         xennet_disconnect_backend(info);
2326
2327         xennet_sysfs_delif(info->netdev);
2328
2329         unregister_netdev(info->netdev);
2330
2331         for (i = 0; i < num_queues; ++i) {
2332                 queue = &info->queues[i];
2333                 del_timer_sync(&queue->rx_refill_timer);
2334         }
2335
2336         if (num_queues) {
2337                 kfree(info->queues);
2338                 info->queues = NULL;
2339         }
2340
2341         free_percpu(info->stats);
2342
2343         free_netdev(info->netdev);
2344
2345         return 0;
2346 }
2347
2348 static DEFINE_XENBUS_DRIVER(netfront, ,
2349         .probe = netfront_probe,
2350         .remove = xennet_remove,
2351         .resume = netfront_resume,
2352         .otherend_changed = netback_changed,
2353 );
2354
2355 static int __init netif_init(void)
2356 {
2357         if (!xen_domain())
2358                 return -ENODEV;
2359
2360         if (!xen_has_pv_nic_devices())
2361                 return -ENODEV;
2362
2363         pr_info("Initialising Xen virtual ethernet driver\n");
2364
2365         /* Allow as many queues as there are CPUs, by default */
2366         xennet_max_queues = num_online_cpus();
2367
2368         return xenbus_register_frontend(&netfront_driver);
2369 }
2370 module_init(netif_init);
2371
2372
2373 static void __exit netif_exit(void)
2374 {
2375         xenbus_unregister_driver(&netfront_driver);
2376 }
2377 module_exit(netif_exit);
2378
2379 MODULE_DESCRIPTION("Xen virtual network device frontend");
2380 MODULE_LICENSE("GPL");
2381 MODULE_ALIAS("xen:vif");
2382 MODULE_ALIAS("xennet");