Merge tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 10 Sep 2015 23:21:11 +0000 (16:21 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 10 Sep 2015 23:21:11 +0000 (16:21 -0700)
Pull xen terminology fixes from David Vrabel:
 "Use the correct GFN/BFN terms more consistently"

* tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/xenbus: Rename the variable xen_store_mfn to xen_store_gfn
  xen/privcmd: Further s/MFN/GFN/ clean-up
  hvc/xen: Further s/MFN/GFN clean-up
  video/xen-fbfront: Further s/MFN/GFN clean-up
  xen/tmem: Use xen_page_to_gfn rather than pfn_to_gfn
  xen: Use correctly the Xen memory terminologies
  arm/xen: implement correctly pfn_to_mfn
  xen: Make clear that swiotlb and biomerge are dealing with DMA address

1  2 
drivers/block/xen-blkfront.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/xen/events/events_base.c

@@@ -83,6 -83,7 +83,6 @@@ struct blk_shadow 
  struct split_bio {
        struct bio *bio;
        atomic_t pending;
 -      int err;
  };
  
  static DEFINE_MUTEX(blkfront_mutex);
@@@ -249,7 -250,7 +249,7 @@@ static struct grant *get_grant(grant_re
                                 struct blkfront_info *info)
  {
        struct grant *gnt_list_entry;
-       unsigned long buffer_mfn;
+       unsigned long buffer_gfn;
  
        BUG_ON(list_empty(&info->grants));
        gnt_list_entry = list_first_entry(&info->grants, struct grant,
                BUG_ON(!pfn);
                gnt_list_entry->pfn = pfn;
        }
-       buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
+       buffer_gfn = pfn_to_gfn(gnt_list_entry->pfn);
        gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
                                        info->xbdev->otherend_id,
-                                       buffer_mfn, 0);
+                                       buffer_gfn, 0);
        return gnt_list_entry;
  }
  
@@@ -1475,14 -1476,16 +1475,14 @@@ static int blkfront_probe(struct xenbus
        return 0;
  }
  
 -static void split_bio_end(struct bio *bio, int error)
 +static void split_bio_end(struct bio *bio)
  {
        struct split_bio *split_bio = bio->bi_private;
  
 -      if (error)
 -              split_bio->err = error;
 -
        if (atomic_dec_and_test(&split_bio->pending)) {
                split_bio->bio->bi_phys_segments = 0;
 -              bio_endio(split_bio->bio, split_bio->err);
 +              split_bio->bio->bi_error = bio->bi_error;
 +              bio_endio(split_bio->bio);
                kfree(split_bio);
        }
        bio_put(bio);
@@@ -149,20 -149,9 +149,20 @@@ static inline pending_ring_idx_t pendin
        return i & (MAX_PENDING_REQS-1);
  }
  
 -bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
 +static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
 +{
 +      if (vif->gso_mask)
 +              return DIV_ROUND_UP(vif->dev->gso_max_size, PAGE_SIZE) + 1;
 +      else
 +              return DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
 +}
 +
 +static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
  {
        RING_IDX prod, cons;
 +      int needed;
 +
 +      needed = xenvif_rx_ring_slots_needed(queue->vif);
  
        do {
                prod = queue->rx.sring->req_prod;
@@@ -325,7 -314,7 +325,7 @@@ static void xenvif_gop_frag_copy(struc
                } else {
                        copy_gop->source.domid = DOMID_SELF;
                        copy_gop->source.u.gmfn =
-                               virt_to_mfn(page_address(page));
+                               virt_to_gfn(page_address(page));
                }
                copy_gop->source.offset = offset;
  
@@@ -524,7 -513,7 +524,7 @@@ static void xenvif_rx_action(struct xen
  
        skb_queue_head_init(&rxq);
  
 -      while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
 +      while (xenvif_rx_ring_slots_available(queue)
               && (skb = xenvif_rx_dequeue(queue)) != NULL) {
                queue->last_rx_time = jiffies;
  
@@@ -1168,80 -1157,6 +1168,80 @@@ static bool tx_credit_exceeded(struct x
        return false;
  }
  
 +/* No locking is required in xenvif_mcast_add/del() as they are
 + * only ever invoked from NAPI poll. An RCU list is used because
 + * xenvif_mcast_match() is called asynchronously, during start_xmit.
 + */
 +
 +static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
 +{
 +      struct xenvif_mcast_addr *mcast;
 +
 +      if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
 +              if (net_ratelimit())
 +                      netdev_err(vif->dev,
 +                                 "Too many multicast addresses\n");
 +              return -ENOSPC;
 +      }
 +
 +      mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
 +      if (!mcast)
 +              return -ENOMEM;
 +
 +      ether_addr_copy(mcast->addr, addr);
 +      list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
 +      vif->fe_mcast_count++;
 +
 +      return 0;
 +}
 +
 +static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
 +{
 +      struct xenvif_mcast_addr *mcast;
 +
 +      list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
 +              if (ether_addr_equal(addr, mcast->addr)) {
 +                      --vif->fe_mcast_count;
 +                      list_del_rcu(&mcast->entry);
 +                      kfree_rcu(mcast, rcu);
 +                      break;
 +              }
 +      }
 +}
 +
 +bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
 +{
 +      struct xenvif_mcast_addr *mcast;
 +
 +      rcu_read_lock();
 +      list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
 +              if (ether_addr_equal(addr, mcast->addr)) {
 +                      rcu_read_unlock();
 +                      return true;
 +              }
 +      }
 +      rcu_read_unlock();
 +
 +      return false;
 +}
 +
 +void xenvif_mcast_addr_list_free(struct xenvif *vif)
 +{
 +      /* No need for locking or RCU here. NAPI poll and TX queue
 +       * are stopped.
 +       */
 +      while (!list_empty(&vif->fe_mcast_addr)) {
 +              struct xenvif_mcast_addr *mcast;
 +
 +              mcast = list_first_entry(&vif->fe_mcast_addr,
 +                                       struct xenvif_mcast_addr,
 +                                       entry);
 +              --vif->fe_mcast_count;
 +              list_del(&mcast->entry);
 +              kfree(mcast);
 +      }
 +}
 +
  static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                                     int budget,
                                     unsigned *copy_ops,
                                break;
                }
  
 +              if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
 +                      struct xen_netif_extra_info *extra;
 +
 +                      extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
 +                      ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
 +
 +                      make_tx_response(queue, &txreq,
 +                                       (ret == 0) ?
 +                                       XEN_NETIF_RSP_OKAY :
 +                                       XEN_NETIF_RSP_ERROR);
 +                      push_tx_responses(queue);
 +                      continue;
 +              }
 +
 +              if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
 +                      struct xen_netif_extra_info *extra;
 +
 +                      extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
 +                      xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
 +
 +                      make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
 +                      push_tx_responses(queue);
 +                      continue;
 +              }
 +
                ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
                if (unlikely(ret < 0))
                        break;
                queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
  
                queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
-                       virt_to_mfn(skb->data);
+                       virt_to_gfn(skb->data);
                queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
                queue->tx_copy_ops[*copy_ops].dest.offset =
                        offset_in_page(skb->data);
@@@ -1949,7 -1839,8 +1949,7 @@@ static bool xenvif_rx_queue_stalled(str
        prod = queue->rx.sring->req_prod;
        cons = queue->rx.req_cons;
  
 -      return !queue->stalled
 -              && prod - cons < XEN_NETBK_RX_SLOTS_MAX
 +      return !queue->stalled && prod - cons < 1
                && time_after(jiffies,
                              queue->last_rx_time + queue->vif->stall_timeout);
  }
@@@ -1961,13 -1852,14 +1961,13 @@@ static bool xenvif_rx_queue_ready(struc
        prod = queue->rx.sring->req_prod;
        cons = queue->rx.req_cons;
  
 -      return queue->stalled
 -              && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
 +      return queue->stalled && prod - cons >= 1;
  }
  
  static bool xenvif_have_rx_work(struct xenvif_queue *queue)
  {
        return (!skb_queue_empty(&queue->rx_queue)
 -              && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
 +              && xenvif_rx_ring_slots_available(queue))
                || (queue->vif->stall_timeout &&
                    (xenvif_rx_queue_stalled(queue)
                     || xenvif_rx_queue_ready(queue)))
@@@ -2114,11 -2006,8 +2114,11 @@@ static int __init netback_init(void
        if (!xen_domain())
                return -ENODEV;
  
 -      /* Allow as many queues as there are CPUs, by default */
 -      xenvif_max_queues = num_online_cpus();
 +      /* Allow as many queues as there are CPUs if user has not
 +       * specified a value.
 +       */
 +      if (xenvif_max_queues == 0)
 +              xenvif_max_queues = num_online_cpus();
  
        if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
                pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
@@@ -291,7 -291,7 +291,7 @@@ static void xennet_alloc_rx_buffers(str
                struct sk_buff *skb;
                unsigned short id;
                grant_ref_t ref;
-               unsigned long pfn;
+               unsigned long gfn;
                struct xen_netif_rx_request *req;
  
                skb = xennet_alloc_one_rx_buffer(queue);
                BUG_ON((signed short)ref < 0);
                queue->grant_rx_ref[id] = ref;
  
-               pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
+               gfn = xen_page_to_gfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
  
                req = RING_GET_REQUEST(&queue->rx, req_prod);
                gnttab_grant_foreign_access_ref(ref,
                                                queue->info->xbdev->otherend_id,
-                                               pfn_to_mfn(pfn),
+                                               gfn,
                                                0);
  
                req->id = id;
@@@ -430,8 -430,10 +430,10 @@@ static struct xen_netif_tx_request *xen
        ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
        BUG_ON((signed short)ref < 0);
  
-       gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
-                                       page_to_mfn(page), GNTMAP_readonly);
+       gnttab_grant_foreign_access_ref(ref,
+                                       queue->info->xbdev->otherend_id,
+                                       xen_page_to_gfn(page),
+                                       GNTMAP_readonly);
  
        queue->tx_skbs[id].skb = skb;
        queue->grant_tx_page[id] = page;
@@@ -1336,7 -1338,7 +1338,7 @@@ static void xennet_disconnect_backend(s
  
        netif_carrier_off(info->netdev);
  
 -      for (i = 0; i < num_queues; ++i) {
 +      for (i = 0; i < num_queues && info->queues; ++i) {
                struct netfront_queue *queue = &info->queues[i];
  
                if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
                queue->tx_evtchn = queue->rx_evtchn = 0;
                queue->tx_irq = queue->rx_irq = 0;
  
 -              napi_synchronize(&queue->napi);
 +              if (netif_running(info->netdev))
 +                      napi_synchronize(&queue->napi);
  
                xennet_release_tx_bufs(queue);
                xennet_release_rx_bufs(queue);
@@@ -2102,8 -2103,7 +2104,8 @@@ static int xennet_remove(struct xenbus_
  
        unregister_netdev(info->netdev);
  
 -      xennet_destroy_queues(info);
 +      if (info->queues)
 +              xennet_destroy_queues(info);
        xennet_free_netdev(info->netdev);
  
        return 0;
@@@ -2132,11 -2132,8 +2134,11 @@@ static int __init netif_init(void
  
        pr_info("Initialising Xen virtual ethernet driver\n");
  
 -      /* Allow as many queues as there are CPUs, by default */
 -      xennet_max_queues = num_online_cpus();
 +      /* Allow as many queues as there are CPUs if user has not
 +       * specified a value.
 +       */
 +      if (xennet_max_queues == 0)
 +              xennet_max_queues = num_online_cpus();
  
        return xenbus_register_frontend(&netfront_driver);
  }
@@@ -336,7 -336,7 +336,7 @@@ static void bind_evtchn_to_cpu(unsigne
  
        BUG_ON(irq == -1);
  #ifdef CONFIG_SMP
 -      cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu));
 +      cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
  #endif
        xen_evtchn_port_bind_to_cpu(info, cpu);
  
@@@ -373,7 -373,7 +373,7 @@@ static void xen_irq_init(unsigned irq
        struct irq_info *info;
  #ifdef CONFIG_SMP
        /* By default all event channels notify CPU#0. */
 -      cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0));
 +      cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
  #endif
  
        info = kzalloc(sizeof(*info), GFP_KERNEL);
@@@ -1688,7 -1688,7 +1688,7 @@@ void __init xen_init_IRQ(void
                struct physdev_pirq_eoi_gmfn eoi_gmfn;
  
                pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
-               eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
+               eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
                rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
                /* TODO: No PVH support for PIRQ EOI */
                if (rc != 0) {