xen-netback: add support for multicast control
[cascardo/linux.git] / drivers / net / xen-netback / netback.c
index 7d50711..42569b9 100644 (file)
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
                                                        struct sk_buff *skb,
                                                        struct xen_netif_tx_request *txp,
-                                                       struct gnttab_map_grant_ref *gop)
+                                                       struct gnttab_map_grant_ref *gop,
+                                                       unsigned int frag_overflow,
+                                                       struct sk_buff *nskb)
 {
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        skb_frag_t *frags = shinfo->frags;
        u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
        int start;
        pending_ring_idx_t index;
-       unsigned int nr_slots, frag_overflow = 0;
+       unsigned int nr_slots;
 
-       /* At this point shinfo->nr_frags is in fact the number of
-        * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
-        */
-       if (shinfo->nr_frags > MAX_SKB_FRAGS) {
-               frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
-               BUG_ON(frag_overflow > MAX_SKB_FRAGS);
-               shinfo->nr_frags = MAX_SKB_FRAGS;
-       }
        nr_slots = shinfo->nr_frags;
 
        /* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
        }
 
        if (frag_overflow) {
-               struct sk_buff *nskb = xenvif_alloc_skb(0);
-               if (unlikely(nskb == NULL)) {
-                       if (net_ratelimit())
-                               netdev_err(queue->vif->dev,
-                                          "Can't allocate the frag_list skb.\n");
-                       return NULL;
-               }
 
                shinfo = skb_shinfo(nskb);
                frags = shinfo->frags;
@@ -1170,14 +1157,89 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
        return false;
 }
 
+/* No locking is required in xenvif_mcast_add/del() as they are
+ * only ever invoked from NAPI poll. An RCU list is used because
+ * xenvif_mcast_match() is called asynchronously, during start_xmit.
+ */
+
+static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
+{
+       struct xenvif_mcast_addr *mcast;
+
+       if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
+               if (net_ratelimit())
+                       netdev_err(vif->dev,
+                                  "Too many multicast addresses\n");
+               return -ENOSPC;
+       }
+
+       mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
+       if (!mcast)
+               return -ENOMEM;
+
+       ether_addr_copy(mcast->addr, addr);
+       list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
+       vif->fe_mcast_count++;
+
+       return 0;
+}
+
+static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
+{
+       struct xenvif_mcast_addr *mcast;
+
+       list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+               if (ether_addr_equal(addr, mcast->addr)) {
+                       --vif->fe_mcast_count;
+                       list_del_rcu(&mcast->entry);
+                       kfree_rcu(mcast, rcu);
+                       break;
+               }
+       }
+}
+
+bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
+{
+       struct xenvif_mcast_addr *mcast;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+               if (ether_addr_equal(addr, mcast->addr)) {
+                       rcu_read_unlock();
+                       return true;
+               }
+       }
+       rcu_read_unlock();
+
+       return false;
+}
+
+void xenvif_mcast_addr_list_free(struct xenvif *vif)
+{
+       /* No need for locking or RCU here. NAPI poll and TX queue
+        * are stopped.
+        */
+       while (!list_empty(&vif->fe_mcast_addr)) {
+               struct xenvif_mcast_addr *mcast;
+
+               mcast = list_first_entry(&vif->fe_mcast_addr,
+                                        struct xenvif_mcast_addr,
+                                        entry);
+               --vif->fe_mcast_count;
+               list_del(&mcast->entry);
+               kfree(mcast);
+       }
+}
+
 static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                                     int budget,
                                     unsigned *copy_ops,
                                     unsigned *map_ops)
 {
-       struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
-       struct sk_buff *skb;
+       struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+       struct sk_buff *skb, *nskb;
        int ret;
+       unsigned int frag_overflow;
 
        while (skb_queue_len(&queue->tx_queue) < budget) {
                struct xen_netif_tx_request txreq;
@@ -1227,6 +1289,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                                break;
                }
 
+               if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
+                       struct xen_netif_extra_info *extra;
+
+                       extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
+                       ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
+
+                       make_tx_response(queue, &txreq,
+                                        (ret == 0) ?
+                                        XEN_NETIF_RSP_OKAY :
+                                        XEN_NETIF_RSP_ERROR);
+                       push_tx_responses(queue);
+                       continue;
+               }
+
+               if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
+                       struct xen_netif_extra_info *extra;
+
+                       extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
+                       xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
+
+                       make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
+                       push_tx_responses(queue);
+                       continue;
+               }
+
                ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
                if (unlikely(ret < 0))
                        break;
@@ -1265,6 +1352,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        break;
                }
 
+               skb_shinfo(skb)->nr_frags = ret;
+               if (data_len < txreq.size)
+                       skb_shinfo(skb)->nr_frags++;
+               /* At this point shinfo->nr_frags is in fact the number of
+                * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+                */
+               frag_overflow = 0;
+               nskb = NULL;
+               if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
+                       frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
+                       BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+                       skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+                       nskb = xenvif_alloc_skb(0);
+                       if (unlikely(nskb == NULL)) {
+                               kfree_skb(skb);
+                               xenvif_tx_err(queue, &txreq, idx);
+                               if (net_ratelimit())
+                                       netdev_err(queue->vif->dev,
+                                                  "Can't allocate the frag_list skb.\n");
+                               break;
+                       }
+               }
+
                if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
                        struct xen_netif_extra_info *gso;
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1382,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
                                /* Failure in xenvif_set_skb_gso is fatal. */
                                kfree_skb(skb);
+                               kfree_skb(nskb);
                                break;
                        }
                }
@@ -1294,9 +1405,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                (*copy_ops)++;
 
-               skb_shinfo(skb)->nr_frags = ret;
                if (data_len < txreq.size) {
-                       skb_shinfo(skb)->nr_frags++;
                        frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
                                             pending_idx);
                        xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1419,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                queue->pending_cons++;
 
-               request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
-               if (request_gop == NULL) {
-                       kfree_skb(skb);
-                       xenvif_tx_err(queue, &txreq, idx);
-                       break;
-               }
-               gop = request_gop;
+               gop = xenvif_get_requests(queue, skb, txfrags, gop,
+                                         frag_overflow, nskb);
 
                __skb_queue_tail(&queue->tx_queue, skb);
 
@@ -1536,7 +1640,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
                smp_wmb();
                queue->dealloc_prod++;
        } while (ubuf);
-       wake_up(&queue->dealloc_wq);
        spin_unlock_irqrestore(&queue->callback_lock, flags);
 
        if (likely(zerocopy_success))