smc9194: replace printk with netdev_ calls
[cascardo/linux.git] / drivers / net / virtio_net.c
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_net.h>
26 #include <linux/scatterlist.h>
27 #include <linux/if_vlan.h>
28 #include <linux/slab.h>
29 #include <linux/cpu.h>
30
31 static int napi_weight = NAPI_POLL_WEIGHT;
32 module_param(napi_weight, int, 0444);
33
34 static bool csum = true, gso = true;
35 module_param(csum, bool, 0444);
36 module_param(gso, bool, 0444);
37
38 /* FIXME: MTU in config. */
39 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
40 #define GOOD_COPY_LEN   128
41
42 #define VIRTNET_DRIVER_VERSION "1.0.0"
43
44 struct virtnet_stats {
45         struct u64_stats_sync tx_syncp;
46         struct u64_stats_sync rx_syncp;
47         u64 tx_bytes;
48         u64 tx_packets;
49
50         u64 rx_bytes;
51         u64 rx_packets;
52 };
53
54 /* Internal representation of a send virtqueue */
55 struct send_queue {
56         /* Virtqueue associated with this send _queue */
57         struct virtqueue *vq;
58
59         /* TX: fragments + linear part + virtio header */
60         struct scatterlist sg[MAX_SKB_FRAGS + 2];
61
62         /* Name of the send queue: output.$index */
63         char name[40];
64 };
65
66 /* Internal representation of a receive virtqueue */
67 struct receive_queue {
68         /* Virtqueue associated with this receive_queue */
69         struct virtqueue *vq;
70
71         struct napi_struct napi;
72
73         /* Number of input buffers, and max we've ever had. */
74         unsigned int num, max;
75
76         /* Chain pages by the private ptr. */
77         struct page *pages;
78
79         /* RX: fragments + linear part + virtio header */
80         struct scatterlist sg[MAX_SKB_FRAGS + 2];
81
82         /* Name of this receive queue: input.$index */
83         char name[40];
84 };
85
86 struct virtnet_info {
87         struct virtio_device *vdev;
88         struct virtqueue *cvq;
89         struct net_device *dev;
90         struct send_queue *sq;
91         struct receive_queue *rq;
92         unsigned int status;
93
94         /* Max # of queue pairs supported by the device */
95         u16 max_queue_pairs;
96
97         /* # of queue pairs currently used by the driver */
98         u16 curr_queue_pairs;
99
100         /* I like... big packets and I cannot lie! */
101         bool big_packets;
102
103         /* Host will merge rx buffers for big packets (shake it! shake it!) */
104         bool mergeable_rx_bufs;
105
106         /* Has control virtqueue */
107         bool has_cvq;
108
109         /* Host can handle any s/g split between our header and packet data */
110         bool any_header_sg;
111
112         /* enable config space updates */
113         bool config_enable;
114
115         /* Active statistics */
116         struct virtnet_stats __percpu *stats;
117
118         /* Work struct for refilling if we run low on memory. */
119         struct delayed_work refill;
120
121         /* Work struct for config space updates */
122         struct work_struct config_work;
123
124         /* Lock for config space updates */
125         struct mutex config_lock;
126
127         /* Page_frag for GFP_KERNEL packet buffer allocation when we run
128          * low on memory.
129          */
130         struct page_frag alloc_frag;
131
132         /* Does the affinity hint is set for virtqueues? */
133         bool affinity_hint_set;
134
135         /* Per-cpu variable to show the mapping from CPU to virtqueue */
136         int __percpu *vq_index;
137
138         /* CPU hot plug notifier */
139         struct notifier_block nb;
140 };
141
142 struct skb_vnet_hdr {
143         union {
144                 struct virtio_net_hdr hdr;
145                 struct virtio_net_hdr_mrg_rxbuf mhdr;
146         };
147 };
148
149 struct padded_vnet_hdr {
150         struct virtio_net_hdr hdr;
151         /*
152          * virtio_net_hdr should be in a separated sg buffer because of a
153          * QEMU bug, and data sg buffer shares same page with this header sg.
154          * This padding makes next sg 16 byte aligned after virtio_net_hdr.
155          */
156         char padding[6];
157 };
158
159 /* Converting between virtqueue no. and kernel tx/rx queue no.
160  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
161  */
162 static int vq2txq(struct virtqueue *vq)
163 {
164         return (vq->index - 1) / 2;
165 }
166
167 static int txq2vq(int txq)
168 {
169         return txq * 2 + 1;
170 }
171
172 static int vq2rxq(struct virtqueue *vq)
173 {
174         return vq->index / 2;
175 }
176
177 static int rxq2vq(int rxq)
178 {
179         return rxq * 2;
180 }
181
182 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
183 {
184         return (struct skb_vnet_hdr *)skb->cb;
185 }
186
187 /*
188  * private is used to chain pages for big packets, put the whole
189  * most recent used list in the beginning for reuse
190  */
191 static void give_pages(struct receive_queue *rq, struct page *page)
192 {
193         struct page *end;
194
195         /* Find end of list, sew whole thing into vi->rq.pages. */
196         for (end = page; end->private; end = (struct page *)end->private);
197         end->private = (unsigned long)rq->pages;
198         rq->pages = page;
199 }
200
201 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
202 {
203         struct page *p = rq->pages;
204
205         if (p) {
206                 rq->pages = (struct page *)p->private;
207                 /* clear private here, it is used to chain pages */
208                 p->private = 0;
209         } else
210                 p = alloc_page(gfp_mask);
211         return p;
212 }
213
214 static void skb_xmit_done(struct virtqueue *vq)
215 {
216         struct virtnet_info *vi = vq->vdev->priv;
217
218         /* Suppress further interrupts. */
219         virtqueue_disable_cb(vq);
220
221         /* We were probably waiting for more output buffers. */
222         netif_wake_subqueue(vi->dev, vq2txq(vq));
223 }
224
225 /* Called from bottom half context */
226 static struct sk_buff *page_to_skb(struct receive_queue *rq,
227                                    struct page *page, unsigned int offset,
228                                    unsigned int len, unsigned int truesize)
229 {
230         struct virtnet_info *vi = rq->vq->vdev->priv;
231         struct sk_buff *skb;
232         struct skb_vnet_hdr *hdr;
233         unsigned int copy, hdr_len, hdr_padded_len;
234         char *p;
235
236         p = page_address(page) + offset;
237
238         /* copy small packet so we can reuse these pages for small data */
239         skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
240         if (unlikely(!skb))
241                 return NULL;
242
243         hdr = skb_vnet_hdr(skb);
244
245         if (vi->mergeable_rx_bufs) {
246                 hdr_len = sizeof hdr->mhdr;
247                 hdr_padded_len = sizeof hdr->mhdr;
248         } else {
249                 hdr_len = sizeof hdr->hdr;
250                 hdr_padded_len = sizeof(struct padded_vnet_hdr);
251         }
252
253         memcpy(hdr, p, hdr_len);
254
255         len -= hdr_len;
256         offset += hdr_padded_len;
257         p += hdr_padded_len;
258
259         copy = len;
260         if (copy > skb_tailroom(skb))
261                 copy = skb_tailroom(skb);
262         memcpy(skb_put(skb, copy), p, copy);
263
264         len -= copy;
265         offset += copy;
266
267         if (vi->mergeable_rx_bufs) {
268                 if (len)
269                         skb_add_rx_frag(skb, 0, page, offset, len, truesize);
270                 else
271                         put_page(page);
272                 return skb;
273         }
274
275         /*
276          * Verify that we can indeed put this data into a skb.
277          * This is here to handle cases when the device erroneously
278          * tries to receive more than is possible. This is usually
279          * the case of a broken device.
280          */
281         if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
282                 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
283                 dev_kfree_skb(skb);
284                 return NULL;
285         }
286         BUG_ON(offset >= PAGE_SIZE);
287         while (len) {
288                 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
289                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
290                                 frag_size, truesize);
291                 len -= frag_size;
292                 page = (struct page *)page->private;
293                 offset = 0;
294         }
295
296         if (page)
297                 give_pages(rq, page);
298
299         return skb;
300 }
301
302 static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
303 {
304         struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
305         struct sk_buff *curr_skb = head_skb;
306         char *buf;
307         struct page *page;
308         int num_buf, len;
309
310         num_buf = hdr->mhdr.num_buffers;
311         while (--num_buf) {
312                 int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
313                 buf = virtqueue_get_buf(rq->vq, &len);
314                 if (unlikely(!buf)) {
315                         pr_debug("%s: rx error: %d buffers missing\n",
316                                  head_skb->dev->name, hdr->mhdr.num_buffers);
317                         head_skb->dev->stats.rx_length_errors++;
318                         return -EINVAL;
319                 }
320                 if (unlikely(len > MAX_PACKET_LEN)) {
321                         pr_debug("%s: rx error: merge buffer too long\n",
322                                  head_skb->dev->name);
323                         len = MAX_PACKET_LEN;
324                 }
325                 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
326                         struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
327                         if (unlikely(!nskb)) {
328                                 head_skb->dev->stats.rx_dropped++;
329                                 return -ENOMEM;
330                         }
331                         if (curr_skb == head_skb)
332                                 skb_shinfo(curr_skb)->frag_list = nskb;
333                         else
334                                 curr_skb->next = nskb;
335                         curr_skb = nskb;
336                         head_skb->truesize += nskb->truesize;
337                         num_skb_frags = 0;
338                 }
339                 if (curr_skb != head_skb) {
340                         head_skb->data_len += len;
341                         head_skb->len += len;
342                         head_skb->truesize += MAX_PACKET_LEN;
343                 }
344                 page = virt_to_head_page(buf);
345                 skb_add_rx_frag(curr_skb, num_skb_frags, page,
346                                 buf - (char *)page_address(page), len,
347                                 MAX_PACKET_LEN);
348                 --rq->num;
349         }
350         return 0;
351 }
352
353 static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
354 {
355         struct virtnet_info *vi = rq->vq->vdev->priv;
356         struct net_device *dev = vi->dev;
357         struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
358         struct sk_buff *skb;
359         struct page *page;
360         struct skb_vnet_hdr *hdr;
361
362         if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
363                 pr_debug("%s: short packet %i\n", dev->name, len);
364                 dev->stats.rx_length_errors++;
365                 if (vi->big_packets)
366                         give_pages(rq, buf);
367                 else if (vi->mergeable_rx_bufs)
368                         put_page(virt_to_head_page(buf));
369                 else
370                         dev_kfree_skb(buf);
371                 return;
372         }
373
374         if (!vi->mergeable_rx_bufs && !vi->big_packets) {
375                 skb = buf;
376                 len -= sizeof(struct virtio_net_hdr);
377                 skb_trim(skb, len);
378         } else if (vi->mergeable_rx_bufs) {
379                 struct page *page = virt_to_head_page(buf);
380                 skb = page_to_skb(rq, page,
381                                   (char *)buf - (char *)page_address(page),
382                                   len, MAX_PACKET_LEN);
383                 if (unlikely(!skb)) {
384                         dev->stats.rx_dropped++;
385                         put_page(page);
386                         return;
387                 }
388                 if (receive_mergeable(rq, skb)) {
389                         dev_kfree_skb(skb);
390                         return;
391                 }
392         } else {
393                 page = buf;
394                 skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
395                 if (unlikely(!skb)) {
396                         dev->stats.rx_dropped++;
397                         give_pages(rq, page);
398                         return;
399                 }
400         }
401
402         hdr = skb_vnet_hdr(skb);
403
404         u64_stats_update_begin(&stats->rx_syncp);
405         stats->rx_bytes += skb->len;
406         stats->rx_packets++;
407         u64_stats_update_end(&stats->rx_syncp);
408
409         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
410                 pr_debug("Needs csum!\n");
411                 if (!skb_partial_csum_set(skb,
412                                           hdr->hdr.csum_start,
413                                           hdr->hdr.csum_offset))
414                         goto frame_err;
415         } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
416                 skb->ip_summed = CHECKSUM_UNNECESSARY;
417         }
418
419         skb->protocol = eth_type_trans(skb, dev);
420         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
421                  ntohs(skb->protocol), skb->len, skb->pkt_type);
422
423         if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
424                 pr_debug("GSO!\n");
425                 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
426                 case VIRTIO_NET_HDR_GSO_TCPV4:
427                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
428                         break;
429                 case VIRTIO_NET_HDR_GSO_UDP:
430                         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
431                         break;
432                 case VIRTIO_NET_HDR_GSO_TCPV6:
433                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
434                         break;
435                 default:
436                         net_warn_ratelimited("%s: bad gso type %u.\n",
437                                              dev->name, hdr->hdr.gso_type);
438                         goto frame_err;
439                 }
440
441                 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
442                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
443
444                 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
445                 if (skb_shinfo(skb)->gso_size == 0) {
446                         net_warn_ratelimited("%s: zero gso size.\n", dev->name);
447                         goto frame_err;
448                 }
449
450                 /* Header must be checked, and gso_segs computed. */
451                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
452                 skb_shinfo(skb)->gso_segs = 0;
453         }
454
455         netif_receive_skb(skb);
456         return;
457
458 frame_err:
459         dev->stats.rx_frame_errors++;
460         dev_kfree_skb(skb);
461 }
462
463 static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
464 {
465         struct virtnet_info *vi = rq->vq->vdev->priv;
466         struct sk_buff *skb;
467         struct skb_vnet_hdr *hdr;
468         int err;
469
470         skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
471         if (unlikely(!skb))
472                 return -ENOMEM;
473
474         skb_put(skb, MAX_PACKET_LEN);
475
476         hdr = skb_vnet_hdr(skb);
477         sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
478
479         skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
480
481         err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
482         if (err < 0)
483                 dev_kfree_skb(skb);
484
485         return err;
486 }
487
488 static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
489 {
490         struct page *first, *list = NULL;
491         char *p;
492         int i, err, offset;
493
494         /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
495         for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
496                 first = get_a_page(rq, gfp);
497                 if (!first) {
498                         if (list)
499                                 give_pages(rq, list);
500                         return -ENOMEM;
501                 }
502                 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
503
504                 /* chain new page in list head to match sg */
505                 first->private = (unsigned long)list;
506                 list = first;
507         }
508
509         first = get_a_page(rq, gfp);
510         if (!first) {
511                 give_pages(rq, list);
512                 return -ENOMEM;
513         }
514         p = page_address(first);
515
516         /* rq->sg[0], rq->sg[1] share the same page */
517         /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
518         sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
519
520         /* rq->sg[1] for data packet, from offset */
521         offset = sizeof(struct padded_vnet_hdr);
522         sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
523
524         /* chain first in list head */
525         first->private = (unsigned long)list;
526         err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
527                                   first, gfp);
528         if (err < 0)
529                 give_pages(rq, first);
530
531         return err;
532 }
533
534 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
535 {
536         struct virtnet_info *vi = rq->vq->vdev->priv;
537         char *buf = NULL;
538         int err;
539
540         if (gfp & __GFP_WAIT) {
541                 if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag,
542                                          gfp)) {
543                         buf = (char *)page_address(vi->alloc_frag.page) +
544                               vi->alloc_frag.offset;
545                         get_page(vi->alloc_frag.page);
546                         vi->alloc_frag.offset += MAX_PACKET_LEN;
547                 }
548         } else {
549                 buf = netdev_alloc_frag(MAX_PACKET_LEN);
550         }
551         if (!buf)
552                 return -ENOMEM;
553
554         sg_init_one(rq->sg, buf, MAX_PACKET_LEN);
555         err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
556         if (err < 0)
557                 put_page(virt_to_head_page(buf));
558
559         return err;
560 }
561
562 /*
563  * Returns false if we couldn't fill entirely (OOM).
564  *
565  * Normally run in the receive path, but can also be run from ndo_open
566  * before we're receiving packets, or from refill_work which is
567  * careful to disable receiving (using napi_disable).
568  */
569 static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
570 {
571         struct virtnet_info *vi = rq->vq->vdev->priv;
572         int err;
573         bool oom;
574
575         do {
576                 if (vi->mergeable_rx_bufs)
577                         err = add_recvbuf_mergeable(rq, gfp);
578                 else if (vi->big_packets)
579                         err = add_recvbuf_big(rq, gfp);
580                 else
581                         err = add_recvbuf_small(rq, gfp);
582
583                 oom = err == -ENOMEM;
584                 if (err)
585                         break;
586                 ++rq->num;
587         } while (rq->vq->num_free);
588         if (unlikely(rq->num > rq->max))
589                 rq->max = rq->num;
590         virtqueue_kick(rq->vq);
591         return !oom;
592 }
593
594 static void skb_recv_done(struct virtqueue *rvq)
595 {
596         struct virtnet_info *vi = rvq->vdev->priv;
597         struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
598
599         /* Schedule NAPI, Suppress further interrupts if successful. */
600         if (napi_schedule_prep(&rq->napi)) {
601                 virtqueue_disable_cb(rvq);
602                 __napi_schedule(&rq->napi);
603         }
604 }
605
606 static void virtnet_napi_enable(struct receive_queue *rq)
607 {
608         napi_enable(&rq->napi);
609
610         /* If all buffers were filled by other side before we napi_enabled, we
611          * won't get another interrupt, so process any outstanding packets
612          * now.  virtnet_poll wants re-enable the queue, so we disable here.
613          * We synchronize against interrupts via NAPI_STATE_SCHED */
614         if (napi_schedule_prep(&rq->napi)) {
615                 virtqueue_disable_cb(rq->vq);
616                 local_bh_disable();
617                 __napi_schedule(&rq->napi);
618                 local_bh_enable();
619         }
620 }
621
622 static void refill_work(struct work_struct *work)
623 {
624         struct virtnet_info *vi =
625                 container_of(work, struct virtnet_info, refill.work);
626         bool still_empty;
627         int i;
628
629         for (i = 0; i < vi->curr_queue_pairs; i++) {
630                 struct receive_queue *rq = &vi->rq[i];
631
632                 napi_disable(&rq->napi);
633                 still_empty = !try_fill_recv(rq, GFP_KERNEL);
634                 virtnet_napi_enable(rq);
635
636                 /* In theory, this can happen: if we don't get any buffers in
637                  * we will *never* try to fill again.
638                  */
639                 if (still_empty)
640                         schedule_delayed_work(&vi->refill, HZ/2);
641         }
642 }
643
644 static int virtnet_poll(struct napi_struct *napi, int budget)
645 {
646         struct receive_queue *rq =
647                 container_of(napi, struct receive_queue, napi);
648         struct virtnet_info *vi = rq->vq->vdev->priv;
649         void *buf;
650         unsigned int r, len, received = 0;
651
652 again:
653         while (received < budget &&
654                (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
655                 receive_buf(rq, buf, len);
656                 --rq->num;
657                 received++;
658         }
659
660         if (rq->num < rq->max / 2) {
661                 if (!try_fill_recv(rq, GFP_ATOMIC))
662                         schedule_delayed_work(&vi->refill, 0);
663         }
664
665         /* Out of packets? */
666         if (received < budget) {
667                 r = virtqueue_enable_cb_prepare(rq->vq);
668                 napi_complete(napi);
669                 if (unlikely(virtqueue_poll(rq->vq, r)) &&
670                     napi_schedule_prep(napi)) {
671                         virtqueue_disable_cb(rq->vq);
672                         __napi_schedule(napi);
673                         goto again;
674                 }
675         }
676
677         return received;
678 }
679
680 static int virtnet_open(struct net_device *dev)
681 {
682         struct virtnet_info *vi = netdev_priv(dev);
683         int i;
684
685         for (i = 0; i < vi->max_queue_pairs; i++) {
686                 if (i < vi->curr_queue_pairs)
687                         /* Make sure we have some buffers: if oom use wq. */
688                         if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
689                                 schedule_delayed_work(&vi->refill, 0);
690                 virtnet_napi_enable(&vi->rq[i]);
691         }
692
693         return 0;
694 }
695
696 static void free_old_xmit_skbs(struct send_queue *sq)
697 {
698         struct sk_buff *skb;
699         unsigned int len;
700         struct virtnet_info *vi = sq->vq->vdev->priv;
701         struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
702
703         while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
704                 pr_debug("Sent skb %p\n", skb);
705
706                 u64_stats_update_begin(&stats->tx_syncp);
707                 stats->tx_bytes += skb->len;
708                 stats->tx_packets++;
709                 u64_stats_update_end(&stats->tx_syncp);
710
711                 dev_kfree_skb_any(skb);
712         }
713 }
714
715 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
716 {
717         struct skb_vnet_hdr *hdr;
718         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
719         struct virtnet_info *vi = sq->vq->vdev->priv;
720         unsigned num_sg;
721         unsigned hdr_len;
722         bool can_push;
723
724         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
725         if (vi->mergeable_rx_bufs)
726                 hdr_len = sizeof hdr->mhdr;
727         else
728                 hdr_len = sizeof hdr->hdr;
729
730         can_push = vi->any_header_sg &&
731                 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
732                 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
733         /* Even if we can, don't push here yet as this would skew
734          * csum_start offset below. */
735         if (can_push)
736                 hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
737         else
738                 hdr = skb_vnet_hdr(skb);
739
740         if (skb->ip_summed == CHECKSUM_PARTIAL) {
741                 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
742                 hdr->hdr.csum_start = skb_checksum_start_offset(skb);
743                 hdr->hdr.csum_offset = skb->csum_offset;
744         } else {
745                 hdr->hdr.flags = 0;
746                 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
747         }
748
749         if (skb_is_gso(skb)) {
750                 hdr->hdr.hdr_len = skb_headlen(skb);
751                 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
752                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
753                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
754                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
755                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
756                 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
757                         hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
758                 else
759                         BUG();
760                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
761                         hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
762         } else {
763                 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
764                 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
765         }
766
767         if (vi->mergeable_rx_bufs)
768                 hdr->mhdr.num_buffers = 0;
769
770         if (can_push) {
771                 __skb_push(skb, hdr_len);
772                 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
773                 /* Pull header back to avoid skew in tx bytes calculations. */
774                 __skb_pull(skb, hdr_len);
775         } else {
776                 sg_set_buf(sq->sg, hdr, hdr_len);
777                 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
778         }
779         return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
780 }
781
782 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
783 {
784         struct virtnet_info *vi = netdev_priv(dev);
785         int qnum = skb_get_queue_mapping(skb);
786         struct send_queue *sq = &vi->sq[qnum];
787         int err;
788
789         /* Free up any pending old buffers before queueing new ones. */
790         free_old_xmit_skbs(sq);
791
792         /* Try to transmit */
793         err = xmit_skb(sq, skb);
794
795         /* This should not happen! */
796         if (unlikely(err)) {
797                 dev->stats.tx_fifo_errors++;
798                 if (net_ratelimit())
799                         dev_warn(&dev->dev,
800                                  "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
801                 dev->stats.tx_dropped++;
802                 kfree_skb(skb);
803                 return NETDEV_TX_OK;
804         }
805         virtqueue_kick(sq->vq);
806
807         /* Don't wait up for transmitted skbs to be freed. */
808         skb_orphan(skb);
809         nf_reset(skb);
810
811         /* Apparently nice girls don't return TX_BUSY; stop the queue
812          * before it gets out of hand.  Naturally, this wastes entries. */
813         if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
814                 netif_stop_subqueue(dev, qnum);
815                 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
816                         /* More just got used, free them then recheck. */
817                         free_old_xmit_skbs(sq);
818                         if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
819                                 netif_start_subqueue(dev, qnum);
820                                 virtqueue_disable_cb(sq->vq);
821                         }
822                 }
823         }
824
825         return NETDEV_TX_OK;
826 }
827
828 /*
829  * Send command via the control virtqueue and check status.  Commands
830  * supported by the hypervisor, as indicated by feature bits, should
831  * never fail unless improperly formated.
832  */
833 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
834                                  struct scatterlist *out,
835                                  struct scatterlist *in)
836 {
837         struct scatterlist *sgs[4], hdr, stat;
838         struct virtio_net_ctrl_hdr ctrl;
839         virtio_net_ctrl_ack status = ~0;
840         unsigned out_num = 0, in_num = 0, tmp;
841
842         /* Caller should know better */
843         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
844
845         ctrl.class = class;
846         ctrl.cmd = cmd;
847         /* Add header */
848         sg_init_one(&hdr, &ctrl, sizeof(ctrl));
849         sgs[out_num++] = &hdr;
850
851         if (out)
852                 sgs[out_num++] = out;
853         if (in)
854                 sgs[out_num + in_num++] = in;
855
856         /* Add return status. */
857         sg_init_one(&stat, &status, sizeof(status));
858         sgs[out_num + in_num++] = &stat;
859
860         BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
861         BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
862                < 0);
863
864         virtqueue_kick(vi->cvq);
865
866         /* Spin for a response, the kick causes an ioport write, trapping
867          * into the hypervisor, so the request should be handled immediately.
868          */
869         while (!virtqueue_get_buf(vi->cvq, &tmp))
870                 cpu_relax();
871
872         return status == VIRTIO_NET_OK;
873 }
874
875 static int virtnet_set_mac_address(struct net_device *dev, void *p)
876 {
877         struct virtnet_info *vi = netdev_priv(dev);
878         struct virtio_device *vdev = vi->vdev;
879         int ret;
880         struct sockaddr *addr = p;
881         struct scatterlist sg;
882
883         ret = eth_prepare_mac_addr_change(dev, p);
884         if (ret)
885                 return ret;
886
887         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
888                 sg_init_one(&sg, addr->sa_data, dev->addr_len);
889                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
890                                           VIRTIO_NET_CTRL_MAC_ADDR_SET,
891                                           &sg, NULL)) {
892                         dev_warn(&vdev->dev,
893                                  "Failed to set mac address by vq command.\n");
894                         return -EINVAL;
895                 }
896         } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
897                 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
898                                   addr->sa_data, dev->addr_len);
899         }
900
901         eth_commit_mac_addr_change(dev, p);
902
903         return 0;
904 }
905
906 static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
907                                                struct rtnl_link_stats64 *tot)
908 {
909         struct virtnet_info *vi = netdev_priv(dev);
910         int cpu;
911         unsigned int start;
912
913         for_each_possible_cpu(cpu) {
914                 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
915                 u64 tpackets, tbytes, rpackets, rbytes;
916
917                 do {
918                         start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
919                         tpackets = stats->tx_packets;
920                         tbytes   = stats->tx_bytes;
921                 } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
922
923                 do {
924                         start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
925                         rpackets = stats->rx_packets;
926                         rbytes   = stats->rx_bytes;
927                 } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
928
929                 tot->rx_packets += rpackets;
930                 tot->tx_packets += tpackets;
931                 tot->rx_bytes   += rbytes;
932                 tot->tx_bytes   += tbytes;
933         }
934
935         tot->tx_dropped = dev->stats.tx_dropped;
936         tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
937         tot->rx_dropped = dev->stats.rx_dropped;
938         tot->rx_length_errors = dev->stats.rx_length_errors;
939         tot->rx_frame_errors = dev->stats.rx_frame_errors;
940
941         return tot;
942 }
943
944 #ifdef CONFIG_NET_POLL_CONTROLLER
945 static void virtnet_netpoll(struct net_device *dev)
946 {
947         struct virtnet_info *vi = netdev_priv(dev);
948         int i;
949
950         for (i = 0; i < vi->curr_queue_pairs; i++)
951                 napi_schedule(&vi->rq[i].napi);
952 }
953 #endif
954
955 static void virtnet_ack_link_announce(struct virtnet_info *vi)
956 {
957         rtnl_lock();
958         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
959                                   VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
960                 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
961         rtnl_unlock();
962 }
963
964 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
965 {
966         struct scatterlist sg;
967         struct virtio_net_ctrl_mq s;
968         struct net_device *dev = vi->dev;
969
970         if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
971                 return 0;
972
973         s.virtqueue_pairs = queue_pairs;
974         sg_init_one(&sg, &s, sizeof(s));
975
976         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
977                                   VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
978                 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
979                          queue_pairs);
980                 return -EINVAL;
981         } else {
982                 vi->curr_queue_pairs = queue_pairs;
983                 /* virtnet_open() will refill when device is going to up. */
984                 if (dev->flags & IFF_UP)
985                         schedule_delayed_work(&vi->refill, 0);
986         }
987
988         return 0;
989 }
990
991 static int virtnet_close(struct net_device *dev)
992 {
993         struct virtnet_info *vi = netdev_priv(dev);
994         int i;
995
996         /* Make sure refill_work doesn't re-enable napi! */
997         cancel_delayed_work_sync(&vi->refill);
998
999         for (i = 0; i < vi->max_queue_pairs; i++)
1000                 napi_disable(&vi->rq[i].napi);
1001
1002         return 0;
1003 }
1004
1005 static void virtnet_set_rx_mode(struct net_device *dev)
1006 {
1007         struct virtnet_info *vi = netdev_priv(dev);
1008         struct scatterlist sg[2];
1009         u8 promisc, allmulti;
1010         struct virtio_net_ctrl_mac *mac_data;
1011         struct netdev_hw_addr *ha;
1012         int uc_count;
1013         int mc_count;
1014         void *buf;
1015         int i;
1016
1017         /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
1018         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1019                 return;
1020
1021         promisc = ((dev->flags & IFF_PROMISC) != 0);
1022         allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1023
1024         sg_init_one(sg, &promisc, sizeof(promisc));
1025
1026         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1027                                   VIRTIO_NET_CTRL_RX_PROMISC,
1028                                   sg, NULL))
1029                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1030                          promisc ? "en" : "dis");
1031
1032         sg_init_one(sg, &allmulti, sizeof(allmulti));
1033
1034         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1035                                   VIRTIO_NET_CTRL_RX_ALLMULTI,
1036                                   sg, NULL))
1037                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1038                          allmulti ? "en" : "dis");
1039
1040         uc_count = netdev_uc_count(dev);
1041         mc_count = netdev_mc_count(dev);
1042         /* MAC filter - use one buffer for both lists */
1043         buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1044                       (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1045         mac_data = buf;
1046         if (!buf)
1047                 return;
1048
1049         sg_init_table(sg, 2);
1050
1051         /* Store the unicast list and count in the front of the buffer */
1052         mac_data->entries = uc_count;
1053         i = 0;
1054         netdev_for_each_uc_addr(ha, dev)
1055                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1056
1057         sg_set_buf(&sg[0], mac_data,
1058                    sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1059
1060         /* multicast list and count fill the end */
1061         mac_data = (void *)&mac_data->macs[uc_count][0];
1062
1063         mac_data->entries = mc_count;
1064         i = 0;
1065         netdev_for_each_mc_addr(ha, dev)
1066                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1067
1068         sg_set_buf(&sg[1], mac_data,
1069                    sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1070
1071         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1072                                   VIRTIO_NET_CTRL_MAC_TABLE_SET,
1073                                   sg, NULL))
1074                 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
1075
1076         kfree(buf);
1077 }
1078
1079 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1080                                    __be16 proto, u16 vid)
1081 {
1082         struct virtnet_info *vi = netdev_priv(dev);
1083         struct scatterlist sg;
1084
1085         sg_init_one(&sg, &vid, sizeof(vid));
1086
1087         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1088                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
1089                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1090         return 0;
1091 }
1092
1093 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1094                                     __be16 proto, u16 vid)
1095 {
1096         struct virtnet_info *vi = netdev_priv(dev);
1097         struct scatterlist sg;
1098
1099         sg_init_one(&sg, &vid, sizeof(vid));
1100
1101         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1102                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
1103                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1104         return 0;
1105 }
1106
1107 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1108 {
1109         int i;
1110         int cpu;
1111
1112         if (vi->affinity_hint_set) {
1113                 for (i = 0; i < vi->max_queue_pairs; i++) {
1114                         virtqueue_set_affinity(vi->rq[i].vq, -1);
1115                         virtqueue_set_affinity(vi->sq[i].vq, -1);
1116                 }
1117
1118                 vi->affinity_hint_set = false;
1119         }
1120
1121         i = 0;
1122         for_each_online_cpu(cpu) {
1123                 if (cpu == hcpu) {
1124                         *per_cpu_ptr(vi->vq_index, cpu) = -1;
1125                 } else {
1126                         *per_cpu_ptr(vi->vq_index, cpu) =
1127                                 ++i % vi->curr_queue_pairs;
1128                 }
1129         }
1130 }
1131
1132 static void virtnet_set_affinity(struct virtnet_info *vi)
1133 {
1134         int i;
1135         int cpu;
1136
1137         /* In multiqueue mode, when the number of cpu is equal to the number of
1138          * queue pairs, we let the queue pairs to be private to one cpu by
1139          * setting the affinity hint to eliminate the contention.
1140          */
1141         if (vi->curr_queue_pairs == 1 ||
1142             vi->max_queue_pairs != num_online_cpus()) {
1143                 virtnet_clean_affinity(vi, -1);
1144                 return;
1145         }
1146
1147         i = 0;
1148         for_each_online_cpu(cpu) {
1149                 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1150                 virtqueue_set_affinity(vi->sq[i].vq, cpu);
1151                 *per_cpu_ptr(vi->vq_index, cpu) = i;
1152                 i++;
1153         }
1154
1155         vi->affinity_hint_set = true;
1156 }
1157
1158 static int virtnet_cpu_callback(struct notifier_block *nfb,
1159                                 unsigned long action, void *hcpu)
1160 {
1161         struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
1162
1163         mutex_lock(&vi->config_lock);
1164
1165         if (!vi->config_enable)
1166                 goto done;
1167
1168         switch(action & ~CPU_TASKS_FROZEN) {
1169         case CPU_ONLINE:
1170         case CPU_DOWN_FAILED:
1171         case CPU_DEAD:
1172                 virtnet_set_affinity(vi);
1173                 break;
1174         case CPU_DOWN_PREPARE:
1175                 virtnet_clean_affinity(vi, (long)hcpu);
1176                 break;
1177         default:
1178                 break;
1179         }
1180
1181 done:
1182         mutex_unlock(&vi->config_lock);
1183         return NOTIFY_OK;
1184 }
1185
1186 static void virtnet_get_ringparam(struct net_device *dev,
1187                                 struct ethtool_ringparam *ring)
1188 {
1189         struct virtnet_info *vi = netdev_priv(dev);
1190
1191         ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1192         ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
1193         ring->rx_pending = ring->rx_max_pending;
1194         ring->tx_pending = ring->tx_max_pending;
1195 }
1196
1197
1198 static void virtnet_get_drvinfo(struct net_device *dev,
1199                                 struct ethtool_drvinfo *info)
1200 {
1201         struct virtnet_info *vi = netdev_priv(dev);
1202         struct virtio_device *vdev = vi->vdev;
1203
1204         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1205         strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1206         strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1207
1208 }
1209
1210 /* TODO: Eliminate OOO packets during switching */
1211 static int virtnet_set_channels(struct net_device *dev,
1212                                 struct ethtool_channels *channels)
1213 {
1214         struct virtnet_info *vi = netdev_priv(dev);
1215         u16 queue_pairs = channels->combined_count;
1216         int err;
1217
1218         /* We don't support separate rx/tx channels.
1219          * We don't allow setting 'other' channels.
1220          */
1221         if (channels->rx_count || channels->tx_count || channels->other_count)
1222                 return -EINVAL;
1223
1224         if (queue_pairs > vi->max_queue_pairs)
1225                 return -EINVAL;
1226
1227         get_online_cpus();
1228         err = virtnet_set_queues(vi, queue_pairs);
1229         if (!err) {
1230                 netif_set_real_num_tx_queues(dev, queue_pairs);
1231                 netif_set_real_num_rx_queues(dev, queue_pairs);
1232
1233                 virtnet_set_affinity(vi);
1234         }
1235         put_online_cpus();
1236
1237         return err;
1238 }
1239
1240 static void virtnet_get_channels(struct net_device *dev,
1241                                  struct ethtool_channels *channels)
1242 {
1243         struct virtnet_info *vi = netdev_priv(dev);
1244
1245         channels->combined_count = vi->curr_queue_pairs;
1246         channels->max_combined = vi->max_queue_pairs;
1247         channels->max_other = 0;
1248         channels->rx_count = 0;
1249         channels->tx_count = 0;
1250         channels->other_count = 0;
1251 }
1252
1253 static const struct ethtool_ops virtnet_ethtool_ops = {
1254         .get_drvinfo = virtnet_get_drvinfo,
1255         .get_link = ethtool_op_get_link,
1256         .get_ringparam = virtnet_get_ringparam,
1257         .set_channels = virtnet_set_channels,
1258         .get_channels = virtnet_get_channels,
1259 };
1260
1261 #define MIN_MTU 68
1262 #define MAX_MTU 65535
1263
1264 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
1265 {
1266         if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
1267                 return -EINVAL;
1268         dev->mtu = new_mtu;
1269         return 0;
1270 }
1271
1272 /* To avoid contending a lock hold by a vcpu who would exit to host, select the
1273  * txq based on the processor id.
1274  */
1275 static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
1276 {
1277         int txq;
1278         struct virtnet_info *vi = netdev_priv(dev);
1279
1280         if (skb_rx_queue_recorded(skb)) {
1281                 txq = skb_get_rx_queue(skb);
1282         } else {
1283                 txq = *__this_cpu_ptr(vi->vq_index);
1284                 if (txq == -1)
1285                         txq = 0;
1286         }
1287
1288         while (unlikely(txq >= dev->real_num_tx_queues))
1289                 txq -= dev->real_num_tx_queues;
1290
1291         return txq;
1292 }
1293
1294 static const struct net_device_ops virtnet_netdev = {
1295         .ndo_open            = virtnet_open,
1296         .ndo_stop            = virtnet_close,
1297         .ndo_start_xmit      = start_xmit,
1298         .ndo_validate_addr   = eth_validate_addr,
1299         .ndo_set_mac_address = virtnet_set_mac_address,
1300         .ndo_set_rx_mode     = virtnet_set_rx_mode,
1301         .ndo_change_mtu      = virtnet_change_mtu,
1302         .ndo_get_stats64     = virtnet_stats,
1303         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
1304         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1305         .ndo_select_queue     = virtnet_select_queue,
1306 #ifdef CONFIG_NET_POLL_CONTROLLER
1307         .ndo_poll_controller = virtnet_netpoll,
1308 #endif
1309 };
1310
1311 static void virtnet_config_changed_work(struct work_struct *work)
1312 {
1313         struct virtnet_info *vi =
1314                 container_of(work, struct virtnet_info, config_work);
1315         u16 v;
1316
1317         mutex_lock(&vi->config_lock);
1318         if (!vi->config_enable)
1319                 goto done;
1320
1321         if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
1322                               offsetof(struct virtio_net_config, status),
1323                               &v) < 0)
1324                 goto done;
1325
1326         if (v & VIRTIO_NET_S_ANNOUNCE) {
1327                 netdev_notify_peers(vi->dev);
1328                 virtnet_ack_link_announce(vi);
1329         }
1330
1331         /* Ignore unknown (future) status bits */
1332         v &= VIRTIO_NET_S_LINK_UP;
1333
1334         if (vi->status == v)
1335                 goto done;
1336
1337         vi->status = v;
1338
1339         if (vi->status & VIRTIO_NET_S_LINK_UP) {
1340                 netif_carrier_on(vi->dev);
1341                 netif_tx_wake_all_queues(vi->dev);
1342         } else {
1343                 netif_carrier_off(vi->dev);
1344                 netif_tx_stop_all_queues(vi->dev);
1345         }
1346 done:
1347         mutex_unlock(&vi->config_lock);
1348 }
1349
1350 static void virtnet_config_changed(struct virtio_device *vdev)
1351 {
1352         struct virtnet_info *vi = vdev->priv;
1353
1354         schedule_work(&vi->config_work);
1355 }
1356
1357 static void virtnet_free_queues(struct virtnet_info *vi)
1358 {
1359         kfree(vi->rq);
1360         kfree(vi->sq);
1361 }
1362
1363 static void free_receive_bufs(struct virtnet_info *vi)
1364 {
1365         int i;
1366
1367         for (i = 0; i < vi->max_queue_pairs; i++) {
1368                 while (vi->rq[i].pages)
1369                         __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1370         }
1371 }
1372
1373 static void free_unused_bufs(struct virtnet_info *vi)
1374 {
1375         void *buf;
1376         int i;
1377
1378         for (i = 0; i < vi->max_queue_pairs; i++) {
1379                 struct virtqueue *vq = vi->sq[i].vq;
1380                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
1381                         dev_kfree_skb(buf);
1382         }
1383
1384         for (i = 0; i < vi->max_queue_pairs; i++) {
1385                 struct virtqueue *vq = vi->rq[i].vq;
1386
1387                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1388                         if (vi->big_packets)
1389                                 give_pages(&vi->rq[i], buf);
1390                         else if (vi->mergeable_rx_bufs)
1391                                 put_page(virt_to_head_page(buf));
1392                         else
1393                                 dev_kfree_skb(buf);
1394                         --vi->rq[i].num;
1395                 }
1396                 BUG_ON(vi->rq[i].num != 0);
1397         }
1398 }
1399
1400 static void virtnet_del_vqs(struct virtnet_info *vi)
1401 {
1402         struct virtio_device *vdev = vi->vdev;
1403
1404         virtnet_clean_affinity(vi, -1);
1405
1406         vdev->config->del_vqs(vdev);
1407
1408         virtnet_free_queues(vi);
1409 }
1410
1411 static int virtnet_find_vqs(struct virtnet_info *vi)
1412 {
1413         vq_callback_t **callbacks;
1414         struct virtqueue **vqs;
1415         int ret = -ENOMEM;
1416         int i, total_vqs;
1417         const char **names;
1418
1419         /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
1420          * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
1421          * possible control vq.
1422          */
1423         total_vqs = vi->max_queue_pairs * 2 +
1424                     virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
1425
1426         /* Allocate space for find_vqs parameters */
1427         vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
1428         if (!vqs)
1429                 goto err_vq;
1430         callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
1431         if (!callbacks)
1432                 goto err_callback;
1433         names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
1434         if (!names)
1435                 goto err_names;
1436
1437         /* Parameters for control virtqueue, if any */
1438         if (vi->has_cvq) {
1439                 callbacks[total_vqs - 1] = NULL;
1440                 names[total_vqs - 1] = "control";
1441         }
1442
1443         /* Allocate/initialize parameters for send/receive virtqueues */
1444         for (i = 0; i < vi->max_queue_pairs; i++) {
1445                 callbacks[rxq2vq(i)] = skb_recv_done;
1446                 callbacks[txq2vq(i)] = skb_xmit_done;
1447                 sprintf(vi->rq[i].name, "input.%d", i);
1448                 sprintf(vi->sq[i].name, "output.%d", i);
1449                 names[rxq2vq(i)] = vi->rq[i].name;
1450                 names[txq2vq(i)] = vi->sq[i].name;
1451         }
1452
1453         ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
1454                                          names);
1455         if (ret)
1456                 goto err_find;
1457
1458         if (vi->has_cvq) {
1459                 vi->cvq = vqs[total_vqs - 1];
1460                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1461                         vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1462         }
1463
1464         for (i = 0; i < vi->max_queue_pairs; i++) {
1465                 vi->rq[i].vq = vqs[rxq2vq(i)];
1466                 vi->sq[i].vq = vqs[txq2vq(i)];
1467         }
1468
1469         kfree(names);
1470         kfree(callbacks);
1471         kfree(vqs);
1472
1473         return 0;
1474
1475 err_find:
1476         kfree(names);
1477 err_names:
1478         kfree(callbacks);
1479 err_callback:
1480         kfree(vqs);
1481 err_vq:
1482         return ret;
1483 }
1484
1485 static int virtnet_alloc_queues(struct virtnet_info *vi)
1486 {
1487         int i;
1488
1489         vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
1490         if (!vi->sq)
1491                 goto err_sq;
1492         vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
1493         if (!vi->rq)
1494                 goto err_rq;
1495
1496         INIT_DELAYED_WORK(&vi->refill, refill_work);
1497         for (i = 0; i < vi->max_queue_pairs; i++) {
1498                 vi->rq[i].pages = NULL;
1499                 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
1500                                napi_weight);
1501
1502                 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
1503                 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
1504         }
1505
1506         return 0;
1507
1508 err_rq:
1509         kfree(vi->sq);
1510 err_sq:
1511         return -ENOMEM;
1512 }
1513
1514 static int init_vqs(struct virtnet_info *vi)
1515 {
1516         int ret;
1517
1518         /* Allocate send & receive queues */
1519         ret = virtnet_alloc_queues(vi);
1520         if (ret)
1521                 goto err;
1522
1523         ret = virtnet_find_vqs(vi);
1524         if (ret)
1525                 goto err_free;
1526
1527         get_online_cpus();
1528         virtnet_set_affinity(vi);
1529         put_online_cpus();
1530
1531         return 0;
1532
1533 err_free:
1534         virtnet_free_queues(vi);
1535 err:
1536         return ret;
1537 }
1538
1539 static int virtnet_probe(struct virtio_device *vdev)
1540 {
1541         int i, err;
1542         struct net_device *dev;
1543         struct virtnet_info *vi;
1544         u16 max_queue_pairs;
1545
1546         /* Find if host supports multiqueue virtio_net device */
1547         err = virtio_config_val(vdev, VIRTIO_NET_F_MQ,
1548                                 offsetof(struct virtio_net_config,
1549                                 max_virtqueue_pairs), &max_queue_pairs);
1550
1551         /* We need at least 2 queue's */
1552         if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1553             max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1554             !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1555                 max_queue_pairs = 1;
1556
1557         /* Allocate ourselves a network device with room for our info */
1558         dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
1559         if (!dev)
1560                 return -ENOMEM;
1561
1562         /* Set up network device as normal. */
1563         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1564         dev->netdev_ops = &virtnet_netdev;
1565         dev->features = NETIF_F_HIGHDMA;
1566
1567         SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
1568         SET_NETDEV_DEV(dev, &vdev->dev);
1569
1570         /* Do we support "hardware" checksums? */
1571         if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1572                 /* This opens up the world of extra features. */
1573                 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1574                 if (csum)
1575                         dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1576
1577                 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1578                         dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
1579                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1580                 }
1581                 /* Individual feature bits: what can host handle? */
1582                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
1583                         dev->hw_features |= NETIF_F_TSO;
1584                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
1585                         dev->hw_features |= NETIF_F_TSO6;
1586                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1587                         dev->hw_features |= NETIF_F_TSO_ECN;
1588                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1589                         dev->hw_features |= NETIF_F_UFO;
1590
1591                 if (gso)
1592                         dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
1593                 /* (!csum && gso) case will be fixed by register_netdev() */
1594         }
1595         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
1596                 dev->features |= NETIF_F_RXCSUM;
1597
1598         dev->vlan_features = dev->features;
1599
1600         /* Configuration may specify what MAC to use.  Otherwise random. */
1601         if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
1602                                   offsetof(struct virtio_net_config, mac),
1603                                   dev->dev_addr, dev->addr_len) < 0)
1604                 eth_hw_addr_random(dev);
1605
1606         /* Set up our device-specific information */
1607         vi = netdev_priv(dev);
1608         vi->dev = dev;
1609         vi->vdev = vdev;
1610         vdev->priv = vi;
1611         vi->stats = alloc_percpu(struct virtnet_stats);
1612         err = -ENOMEM;
1613         if (vi->stats == NULL)
1614                 goto free;
1615
1616         vi->vq_index = alloc_percpu(int);
1617         if (vi->vq_index == NULL)
1618                 goto free_stats;
1619
1620         mutex_init(&vi->config_lock);
1621         vi->config_enable = true;
1622         INIT_WORK(&vi->config_work, virtnet_config_changed_work);
1623
1624         /* If we can receive ANY GSO packets, we must allocate large ones. */
1625         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1626             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1627             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1628                 vi->big_packets = true;
1629
1630         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1631                 vi->mergeable_rx_bufs = true;
1632
1633         if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
1634                 vi->any_header_sg = true;
1635
1636         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1637                 vi->has_cvq = true;
1638
1639         /* Use single tx/rx queue pair as default */
1640         vi->curr_queue_pairs = 1;
1641         vi->max_queue_pairs = max_queue_pairs;
1642
1643         /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1644         err = init_vqs(vi);
1645         if (err)
1646                 goto free_index;
1647
1648         netif_set_real_num_tx_queues(dev, 1);
1649         netif_set_real_num_rx_queues(dev, 1);
1650
1651         err = register_netdev(dev);
1652         if (err) {
1653                 pr_debug("virtio_net: registering device failed\n");
1654                 goto free_vqs;
1655         }
1656
1657         /* Last of all, set up some receive buffers. */
1658         for (i = 0; i < vi->curr_queue_pairs; i++) {
1659                 try_fill_recv(&vi->rq[i], GFP_KERNEL);
1660
1661                 /* If we didn't even get one input buffer, we're useless. */
1662                 if (vi->rq[i].num == 0) {
1663                         free_unused_bufs(vi);
1664                         err = -ENOMEM;
1665                         goto free_recv_bufs;
1666                 }
1667         }
1668
1669         vi->nb.notifier_call = &virtnet_cpu_callback;
1670         err = register_hotcpu_notifier(&vi->nb);
1671         if (err) {
1672                 pr_debug("virtio_net: registering cpu notifier failed\n");
1673                 goto free_recv_bufs;
1674         }
1675
1676         /* Assume link up if device can't report link status,
1677            otherwise get link status from config. */
1678         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1679                 netif_carrier_off(dev);
1680                 schedule_work(&vi->config_work);
1681         } else {
1682                 vi->status = VIRTIO_NET_S_LINK_UP;
1683                 netif_carrier_on(dev);
1684         }
1685
1686         pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
1687                  dev->name, max_queue_pairs);
1688
1689         return 0;
1690
1691 free_recv_bufs:
1692         free_receive_bufs(vi);
1693         unregister_netdev(dev);
1694 free_vqs:
1695         cancel_delayed_work_sync(&vi->refill);
1696         virtnet_del_vqs(vi);
1697         if (vi->alloc_frag.page)
1698                 put_page(vi->alloc_frag.page);
1699 free_index:
1700         free_percpu(vi->vq_index);
1701 free_stats:
1702         free_percpu(vi->stats);
1703 free:
1704         free_netdev(dev);
1705         return err;
1706 }
1707
1708 static void remove_vq_common(struct virtnet_info *vi)
1709 {
1710         vi->vdev->config->reset(vi->vdev);
1711
1712         /* Free unused buffers in both send and recv, if any. */
1713         free_unused_bufs(vi);
1714
1715         free_receive_bufs(vi);
1716
1717         virtnet_del_vqs(vi);
1718 }
1719
1720 static void virtnet_remove(struct virtio_device *vdev)
1721 {
1722         struct virtnet_info *vi = vdev->priv;
1723
1724         unregister_hotcpu_notifier(&vi->nb);
1725
1726         /* Prevent config work handler from accessing the device. */
1727         mutex_lock(&vi->config_lock);
1728         vi->config_enable = false;
1729         mutex_unlock(&vi->config_lock);
1730
1731         unregister_netdev(vi->dev);
1732
1733         remove_vq_common(vi);
1734         if (vi->alloc_frag.page)
1735                 put_page(vi->alloc_frag.page);
1736
1737         flush_work(&vi->config_work);
1738
1739         free_percpu(vi->vq_index);
1740         free_percpu(vi->stats);
1741         free_netdev(vi->dev);
1742 }
1743
1744 #ifdef CONFIG_PM
1745 static int virtnet_freeze(struct virtio_device *vdev)
1746 {
1747         struct virtnet_info *vi = vdev->priv;
1748         int i;
1749
1750         /* Prevent config work handler from accessing the device */
1751         mutex_lock(&vi->config_lock);
1752         vi->config_enable = false;
1753         mutex_unlock(&vi->config_lock);
1754
1755         netif_device_detach(vi->dev);
1756         cancel_delayed_work_sync(&vi->refill);
1757
1758         if (netif_running(vi->dev))
1759                 for (i = 0; i < vi->max_queue_pairs; i++) {
1760                         napi_disable(&vi->rq[i].napi);
1761                         netif_napi_del(&vi->rq[i].napi);
1762                 }
1763
1764         remove_vq_common(vi);
1765
1766         flush_work(&vi->config_work);
1767
1768         return 0;
1769 }
1770
1771 static int virtnet_restore(struct virtio_device *vdev)
1772 {
1773         struct virtnet_info *vi = vdev->priv;
1774         int err, i;
1775
1776         err = init_vqs(vi);
1777         if (err)
1778                 return err;
1779
1780         if (netif_running(vi->dev))
1781                 for (i = 0; i < vi->max_queue_pairs; i++)
1782                         virtnet_napi_enable(&vi->rq[i]);
1783
1784         netif_device_attach(vi->dev);
1785
1786         for (i = 0; i < vi->curr_queue_pairs; i++)
1787                 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1788                         schedule_delayed_work(&vi->refill, 0);
1789
1790         mutex_lock(&vi->config_lock);
1791         vi->config_enable = true;
1792         mutex_unlock(&vi->config_lock);
1793
1794         rtnl_lock();
1795         virtnet_set_queues(vi, vi->curr_queue_pairs);
1796         rtnl_unlock();
1797
1798         return 0;
1799 }
1800 #endif
1801
1802 static struct virtio_device_id id_table[] = {
1803         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1804         { 0 },
1805 };
1806
1807 static unsigned int features[] = {
1808         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1809         VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1810         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1811         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1812         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1813         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1814         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1815         VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
1816         VIRTIO_NET_F_CTRL_MAC_ADDR,
1817         VIRTIO_F_ANY_LAYOUT,
1818 };
1819
1820 static struct virtio_driver virtio_net_driver = {
1821         .feature_table = features,
1822         .feature_table_size = ARRAY_SIZE(features),
1823         .driver.name =  KBUILD_MODNAME,
1824         .driver.owner = THIS_MODULE,
1825         .id_table =     id_table,
1826         .probe =        virtnet_probe,
1827         .remove =       virtnet_remove,
1828         .config_changed = virtnet_config_changed,
1829 #ifdef CONFIG_PM
1830         .freeze =       virtnet_freeze,
1831         .restore =      virtnet_restore,
1832 #endif
1833 };
1834
1835 module_virtio_driver(virtio_net_driver);
1836
1837 MODULE_DEVICE_TABLE(virtio, id_table);
1838 MODULE_DESCRIPTION("Virtio network driver");
1839 MODULE_LICENSE("GPL");