Merge branch 'for-linus-dma-masks' of git://git.linaro.org/people/rmk/linux-arm
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 13 Nov 2013 22:55:21 +0000 (07:55 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 13 Nov 2013 22:55:21 +0000 (07:55 +0900)
Pull DMA mask updates from Russell King:
 "This series cleans up the handling of DMA masks in a lot of drivers,
  fixing some bugs as we go.

  Some of the more serious errors include:
   - drivers which only set their coherent DMA mask if the attempt to
     set the streaming mask fails.
   - drivers which test for a NULL dma mask pointer, and then set the
     dma mask pointer to a location in their module .data section -
     which will cause problems if the module is reloaded.

  To counter these, I have introduced two helper functions:
   - dma_set_mask_and_coherent() takes care of setting both the
     streaming and coherent masks at the same time, with the correct
     error handling as specified by the API.
   - dma_coerce_mask_and_coherent() which resolves the problem of
     drivers forcefully setting DMA masks.  This is more a marker for
     future work to further clean these locations up - the code which
     creates the devices really should be initialising these, but to fix
     that in one go along with this change could potentially be very
     disruptive.

  The last thing this series does is prise away some of Linux's addition
  to "DMA addresses are physical addresses and RAM always starts at
  zero".  We have ARM LPAE systems where all system memory is above 4GB
  physical, hence having DMA masks interpreted by (eg) the block layers
  as describing physical addresses in the range 0..DMAMASK fails on
  these platforms.  Santosh Shilimkar addresses this in this series; the
  patches were copied to the appropriate people multiple times but were
  ignored.

  Fixing this also gets rid of some ARM weirdness in the setup of the
  max*pfn variables, and brings ARM into line with every other Linux
  architecture as far as those go"

* 'for-linus-dma-masks' of git://git.linaro.org/people/rmk/linux-arm: (52 commits)
  ARM: 7805/1: mm: change max*pfn to include the physical offset of memory
  ARM: 7797/1: mmc: Use dma_max_pfn(dev) helper for bounce_limit calculations
  ARM: 7796/1: scsi: Use dma_max_pfn(dev) helper for bounce_limit calculations
  ARM: 7795/1: mm: dma-mapping: Add dma_max_pfn(dev) helper function
  ARM: 7794/1: block: Rename parameter dma_mask to max_addr for blk_queue_bounce_limit()
  ARM: DMA-API: better handing of DMA masks for coherent allocations
  ARM: 7857/1: dma: imx-sdma: setup dma mask
  DMA-API: firmware/google/gsmi.c: avoid direct access to DMA masks
  DMA-API: dcdbas: update DMA mask handing
  DMA-API: dma: edma.c: no need to explicitly initialize DMA masks
  DMA-API: usb: musb: use platform_device_register_full() to avoid directly messing with dma masks
  DMA-API: crypto: remove last references to 'static struct device *dev'
  DMA-API: crypto: fix ixp4xx crypto platform device support
  DMA-API: others: use dma_set_coherent_mask()
  DMA-API: staging: use dma_set_coherent_mask()
  DMA-API: usb: use new dma_coerce_mask_and_coherent()
  DMA-API: usb: use dma_set_coherent_mask()
  DMA-API: parport: parport_pc.c: use dma_coerce_mask_and_coherent()
  DMA-API: net: octeon: use dma_coerce_mask_and_coherent()
  DMA-API: net: nxp/lpc_eth: use dma_coerce_mask_and_coherent()
  ...

38 files changed:
1  2 
arch/arm/mm/dma-mapping.c
arch/arm/mm/init.c
arch/powerpc/kernel/vio.c
drivers/dma/edma.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/of/platform.c
drivers/staging/dwc2/platform.c
drivers/staging/et131x/et131x.c
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/imx-drm/ipuv3-crtc.c
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/host/ehci-atmel.c
drivers/usb/host/ehci-exynos.c
drivers/usb/host/ehci-tegra.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/ohci-exynos.c
drivers/usb/host/ohci-nxp.c
drivers/usb/host/ohci-omap3.c
drivers/usb/host/ohci-pxa27x.c
drivers/usb/host/ohci-spear.c
drivers/usb/host/uhci-platform.c
sound/soc/atmel/atmel-pcm.c
sound/soc/davinci/davinci-pcm.c
sound/soc/fsl/fsl_dma.c
sound/soc/fsl/imx-pcm-fiq.c
sound/soc/fsl/mpc5200_dma.c
sound/soc/kirkwood/kirkwood-dma.c
sound/soc/s6000/s6000-pcm.c

@@@ -159,7 -159,7 +159,7 @@@ EXPORT_SYMBOL(arm_coherent_dma_ops)
  
  static u64 get_coherent_dma_mask(struct device *dev)
  {
-       u64 mask = (u64)arm_dma_limit;
+       u64 mask = (u64)DMA_BIT_MASK(32);
  
        if (dev) {
                mask = dev->coherent_dma_mask;
                        return 0;
                }
  
-               if ((~mask) & (u64)arm_dma_limit) {
-                       dev_warn(dev, "coherent DMA mask %#llx is smaller "
-                                "than system GFP_DMA mask %#llx\n",
-                                mask, (u64)arm_dma_limit);
+               /*
+                * If the mask allows for more memory than we can address,
+                * and we actually have that much memory, then fail the
+                * allocation.
+                */
+               if (sizeof(mask) != sizeof(dma_addr_t) &&
+                   mask > (dma_addr_t)~0 &&
+                   dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
+                       dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+                                mask);
+                       dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+                       return 0;
+               }
+               /*
+                * Now check that the mask, when translated to a PFN,
+                * fits within the allowable addresses which we can
+                * allocate.
+                */
+               if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
+                       dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+                                mask,
+                                dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+                                arm_dma_pfn_limit + 1);
                        return 0;
                }
        }
@@@ -1007,8 -1027,27 +1027,27 @@@ void arm_dma_sync_sg_for_device(struct 
   */
  int dma_supported(struct device *dev, u64 mask)
  {
-       if (mask < (u64)arm_dma_limit)
+       unsigned long limit;
+       /*
+        * If the mask allows for more memory than we can address,
+        * and we actually have that much memory, then we must
+        * indicate that DMA to this device is not supported.
+        */
+       if (sizeof(mask) != sizeof(dma_addr_t) &&
+           mask > (dma_addr_t)~0 &&
+           dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
+               return 0;
+       /*
+        * Translate the device's DMA mask to a PFN limit.  This
+        * PFN number includes the page which we can DMA to.
+        */
+       limit = dma_to_pfn(dev, mask);
+       if (limit < arm_dma_pfn_limit)
                return 0;
        return 1;
  }
  EXPORT_SYMBOL(dma_supported);
@@@ -1232,8 -1271,7 +1271,8 @@@ __iommu_create_mapping(struct device *d
                                break;
  
                len = (j - i) << PAGE_SHIFT;
 -              ret = iommu_map(mapping->domain, iova, phys, len, 0);
 +              ret = iommu_map(mapping->domain, iova, phys, len,
 +                              IOMMU_READ|IOMMU_WRITE);
                if (ret < 0)
                        goto fail;
                iova += len;
@@@ -1432,27 -1470,6 +1471,27 @@@ static int arm_iommu_get_sgtable(struc
                                         GFP_KERNEL);
  }
  
 +static int __dma_direction_to_prot(enum dma_data_direction dir)
 +{
 +      int prot;
 +
 +      switch (dir) {
 +      case DMA_BIDIRECTIONAL:
 +              prot = IOMMU_READ | IOMMU_WRITE;
 +              break;
 +      case DMA_TO_DEVICE:
 +              prot = IOMMU_READ;
 +              break;
 +      case DMA_FROM_DEVICE:
 +              prot = IOMMU_WRITE;
 +              break;
 +      default:
 +              prot = 0;
 +      }
 +
 +      return prot;
 +}
 +
  /*
   * Map a part of the scatter-gather list into contiguous io address space
   */
@@@ -1466,7 -1483,6 +1505,7 @@@ static int __map_sg_chunk(struct devic
        int ret = 0;
        unsigned int count;
        struct scatterlist *s;
 +      int prot;
  
        size = PAGE_ALIGN(size);
        *handle = DMA_ERROR_CODE;
                        !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                        __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
  
 -              ret = iommu_map(mapping->domain, iova, phys, len, 0);
 +              prot = __dma_direction_to_prot(dir);
 +
 +              ret = iommu_map(mapping->domain, iova, phys, len, prot);
                if (ret < 0)
                        goto fail;
                count += len >> PAGE_SHIFT;
@@@ -1690,7 -1704,19 +1729,7 @@@ static dma_addr_t arm_coherent_iommu_ma
        if (dma_addr == DMA_ERROR_CODE)
                return dma_addr;
  
 -      switch (dir) {
 -      case DMA_BIDIRECTIONAL:
 -              prot = IOMMU_READ | IOMMU_WRITE;
 -              break;
 -      case DMA_TO_DEVICE:
 -              prot = IOMMU_READ;
 -              break;
 -      case DMA_FROM_DEVICE:
 -              prot = IOMMU_WRITE;
 -              break;
 -      default:
 -              prot = 0;
 -      }
 +      prot = __dma_direction_to_prot(dir);
  
        ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
        if (ret < 0)
diff --combined arch/arm/mm/init.c
@@@ -17,6 -17,7 +17,6 @@@
  #include <linux/nodemask.h>
  #include <linux/initrd.h>
  #include <linux/of_fdt.h>
 -#include <linux/of_reserved_mem.h>
  #include <linux/highmem.h>
  #include <linux/gfp.h>
  #include <linux/memblock.h>
@@@ -76,6 -77,14 +76,6 @@@ static int __init parse_tag_initrd2(con
  
  __tagtable(ATAG_INITRD2, parse_tag_initrd2);
  
 -#ifdef CONFIG_OF_FLATTREE
 -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
 -{
 -      phys_initrd_start = start;
 -      phys_initrd_size = end - start;
 -}
 -#endif /* CONFIG_OF_FLATTREE */
 -
  /*
   * This keeps memory configuration data used by a couple memory
   * initialization functions, as well as show_mem() for the skipping
@@@ -209,6 -218,7 +209,7 @@@ EXPORT_SYMBOL(arm_dma_zone_size)
   * so a successful GFP_DMA allocation will always satisfy this.
   */
  phys_addr_t arm_dma_limit;
+ unsigned long arm_dma_pfn_limit;
  
  static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
        unsigned long dma_size)
@@@ -231,6 -241,7 +232,7 @@@ void __init setup_dma_zone(const struc
                arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
        } else
                arm_dma_limit = 0xffffffff;
+       arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
  #endif
  }
  
@@@ -342,11 -353,6 +344,11 @@@ void __init arm_memblock_init(struct me
        memblock_reserve(__pa(_stext), _end - _stext);
  #endif
  #ifdef CONFIG_BLK_DEV_INITRD
 +      /* FDT scan will populate initrd_start */
 +      if (initrd_start) {
 +              phys_initrd_start = __virt_to_phys(initrd_start);
 +              phys_initrd_size = initrd_end - initrd_start;
 +      }
        if (phys_initrd_size &&
            !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
                pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
        if (mdesc->reserve)
                mdesc->reserve();
  
 -      early_init_dt_scan_reserved_mem();
 -
        /*
         * reserve memory for DMA contigouos allocations,
         * must come from DMA area inside low memory
@@@ -418,12 -426,10 +420,10 @@@ void __init bootmem_init(void
         * This doesn't seem to be used by the Linux memory manager any
         * more, but is used by ll_rw_block.  If we can get rid of it, we
         * also get rid of some of the stuff above as well.
-        *
-        * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
-        * the system, not the maximum PFN.
         */
-       max_low_pfn = max_low - PHYS_PFN_OFFSET;
-       max_pfn = max_high - PHYS_PFN_OFFSET;
+       min_low_pfn = min;
+       max_low_pfn = max_low;
+       max_pfn = max_high;
  }
  
  /*
@@@ -529,7 -535,7 +529,7 @@@ static inline void free_area_high(unsig
  static void __init free_highpages(void)
  {
  #ifdef CONFIG_HIGHMEM
-       unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+       unsigned long max_low = max_low_pfn;
        struct memblock_region *mem, *res;
  
        /* set highmem page free */
@@@ -997,36 -997,21 +997,36 @@@ static struct device_attribute vio_cmo_
  /* sysfs bus functions and data structures for CMO */
  
  #define viobus_cmo_rd_attr(name)                                        \
 -static ssize_t                                                          \
 -viobus_cmo_##name##_show(struct bus_type *bt, char *buf)                \
 +static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf)        \
  {                                                                       \
        return sprintf(buf, "%lu\n", vio_cmo.name);                     \
 -}
 +}                                                                       \
 +static BUS_ATTR_RO(cmo_##name)
  
  #define viobus_cmo_pool_rd_attr(name, var)                              \
  static ssize_t                                                          \
 -viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf)     \
 +cmo_##name##_##var##_show(struct bus_type *bt, char *buf)               \
  {                                                                       \
        return sprintf(buf, "%lu\n", vio_cmo.name.var);                 \
 +}                                                                       \
 +static BUS_ATTR_RO(cmo_##name##_##var)
 +
 +viobus_cmo_rd_attr(entitled);
 +viobus_cmo_rd_attr(spare);
 +viobus_cmo_rd_attr(min);
 +viobus_cmo_rd_attr(desired);
 +viobus_cmo_rd_attr(curr);
 +viobus_cmo_pool_rd_attr(reserve, size);
 +viobus_cmo_pool_rd_attr(excess, size);
 +viobus_cmo_pool_rd_attr(excess, free);
 +
 +static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
 +{
 +      return sprintf(buf, "%lu\n", vio_cmo.high);
  }
  
 -static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf,
 -                                     size_t count)
 +static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
 +                            size_t count)
  {
        unsigned long flags;
  
  
        return count;
  }
 -
 -viobus_cmo_rd_attr(entitled);
 -viobus_cmo_pool_rd_attr(reserve, size);
 -viobus_cmo_pool_rd_attr(excess, size);
 -viobus_cmo_pool_rd_attr(excess, free);
 -viobus_cmo_rd_attr(spare);
 -viobus_cmo_rd_attr(min);
 -viobus_cmo_rd_attr(desired);
 -viobus_cmo_rd_attr(curr);
 -viobus_cmo_rd_attr(high);
 -
 -static struct bus_attribute vio_cmo_bus_attrs[] = {
 -      __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL),
 -      __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL),
 -      __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL),
 -      __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL),
 -      __ATTR(cmo_spare,   S_IRUGO, viobus_cmo_spare_show,   NULL),
 -      __ATTR(cmo_min,     S_IRUGO, viobus_cmo_min_show,     NULL),
 -      __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL),
 -      __ATTR(cmo_curr,    S_IRUGO, viobus_cmo_curr_show,    NULL),
 -      __ATTR(cmo_high,    S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
 -             viobus_cmo_high_show, viobus_cmo_high_reset),
 -      __ATTR_NULL
 +static BUS_ATTR_RW(cmo_high);
 +
 +static struct attribute *vio_bus_attrs[] = {
 +      &bus_attr_cmo_entitled.attr,
 +      &bus_attr_cmo_spare.attr,
 +      &bus_attr_cmo_min.attr,
 +      &bus_attr_cmo_desired.attr,
 +      &bus_attr_cmo_curr.attr,
 +      &bus_attr_cmo_high.attr,
 +      &bus_attr_cmo_reserve_size.attr,
 +      &bus_attr_cmo_excess_size.attr,
 +      &bus_attr_cmo_excess_free.attr,
 +      NULL,
  };
 +ATTRIBUTE_GROUPS(vio_bus);
  
  static void vio_cmo_sysfs_init(void)
  {
        vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
 -      vio_bus_type.bus_attrs = vio_cmo_bus_attrs;
 +      vio_bus_type.bus_groups = vio_bus_groups;
  }
  #else /* CONFIG_PPC_SMLPAR */
  int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
@@@ -1419,8 -1413,7 +1419,7 @@@ struct vio_dev *vio_register_device_nod
  
                /* needed to ensure proper operation of coherent allocations
                 * later, in case driver doesn't set it explicitly */
-               dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
-               dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
+               dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
        }
  
        /* register with generic device framework */
@@@ -1536,15 -1529,11 +1535,15 @@@ static ssize_t modalias_show(struct dev
        const char *cp;
  
        dn = dev->of_node;
 -      if (!dn)
 -              return -ENODEV;
 +      if (!dn) {
 +              strcpy(buf, "\n");
 +              return strlen(buf);
 +      }
        cp = of_get_property(dn, "compatible", NULL);
 -      if (!cp)
 -              return -ENODEV;
 +      if (!cp) {
 +              strcpy(buf, "\n");
 +              return strlen(buf);
 +      }
  
        return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
  }
diff --combined drivers/dma/edma.c
@@@ -305,9 -305,7 +305,9 @@@ static struct dma_async_tx_descriptor *
                                edma_alloc_slot(EDMA_CTLR(echan->ch_num),
                                                EDMA_SLOT_ANY);
                        if (echan->slot[i] < 0) {
 +                              kfree(edesc);
                                dev_err(dev, "Failed to allocate slot\n");
 +                              kfree(edesc);
                                return NULL;
                        }
                }
                        ccnt = sg_dma_len(sg) / (acnt * bcnt);
                        if (ccnt > (SZ_64K - 1)) {
                                dev_err(dev, "Exceeded max SG segment size\n");
 +                              kfree(edesc);
                                return NULL;
                        }
                        cidx = acnt * bcnt;
@@@ -634,6 -631,10 +634,10 @@@ static int edma_probe(struct platform_d
        struct edma_cc *ecc;
        int ret;
  
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
        ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
        if (!ecc) {
                dev_err(&pdev->dev, "Can't allocate controller\n");
@@@ -705,11 -706,13 +709,13 @@@ static struct platform_device *pdev0, *
  static const struct platform_device_info edma_dev_info0 = {
        .name = "edma-dma-engine",
        .id = 0,
+       .dma_mask = DMA_BIT_MASK(32),
  };
  
  static const struct platform_device_info edma_dev_info1 = {
        .name = "edma-dma-engine",
        .id = 1,
+       .dma_mask = DMA_BIT_MASK(32),
  };
  
  static int edma_init(void)
                        ret = PTR_ERR(pdev0);
                        goto out;
                }
-               pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
-               pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        }
  
        if (EDMA_CTLRS == 2) {
                        platform_device_unregister(pdev0);
                        ret = PTR_ERR(pdev1);
                }
-               pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
-               pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        }
  
  out:
@@@ -752,6 -751,6 +754,6 @@@ static void __exit edma_exit(void
  }
  module_exit(edma_exit);
  
 -MODULE_AUTHOR("Matt Porter <mporter@ti.com>");
 +MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
  MODULE_DESCRIPTION("TI EDMA DMA engine driver");
  MODULE_LICENSE("GPL v2");
@@@ -596,7 -596,6 +596,7 @@@ static void b44_timer(unsigned long __o
  static void b44_tx(struct b44 *bp)
  {
        u32 cur, cons;
 +      unsigned bytes_compl = 0, pkts_compl = 0;
  
        cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
        cur /= sizeof(struct dma_desc);
                                 skb->len,
                                 DMA_TO_DEVICE);
                rp->skb = NULL;
 +
 +              bytes_compl += skb->len;
 +              pkts_compl++;
 +
                dev_kfree_skb_irq(skb);
        }
  
 +      netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
        bp->tx_cons = cons;
        if (netif_queue_stopped(bp->dev) &&
            TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
@@@ -1024,8 -1018,6 +1024,8 @@@ static netdev_tx_t b44_start_xmit(struc
        if (bp->flags & B44_FLAG_REORDER_BUG)
                br32(bp, B44_DMATX_PTR);
  
 +      netdev_sent_queue(dev, skb->len);
 +
        if (TX_BUFFS_AVAIL(bp) < 1)
                netif_stop_queue(dev);
  
@@@ -1424,8 -1416,6 +1424,8 @@@ static void b44_init_hw(struct b44 *bp
  
        val = br32(bp, B44_ENET_CTRL);
        bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
 +
 +      netdev_reset_queue(bp->dev);
  }
  
  static int b44_open(struct net_device *dev)
@@@ -2111,7 -2101,7 +2111,7 @@@ static int b44_get_invariants(struct b4
         * valid PHY address. */
        bp->phy_addr &= 0x1F;
  
 -      memcpy(bp->dev->dev_addr, addr, 6);
 +      memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
  
        if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
                pr_err("Invalid MAC address found in EEPROM\n");
@@@ -2193,8 -2183,7 +2193,7 @@@ static int b44_init_one(struct ssb_devi
                goto err_out_free_dev;
        }
  
-       if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
-           dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+       if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
                dev_err(sdev->dev,
                        "Required 30BIT DMA mask unsupported by the system\n");
                goto err_out_powerdown;
@@@ -503,9 -503,9 +503,9 @@@ void bnx2x_prep_dmae_with_comp(struct b
  }
  
  /* issue a dmae command over the init-channel and wait for completion */
 -int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
 +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
 +                             u32 *comp)
  {
 -      u32 *wb_comp = bnx2x_sp(bp, wb_comp);
        int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
        int rc = 0;
  
        spin_lock_bh(&bp->dmae_lock);
  
        /* reset completion */
 -      *wb_comp = 0;
 +      *comp = 0;
  
        /* post the command on the channel used for initializations */
        bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
  
        /* wait for completion */
        udelay(5);
 -      while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
 +      while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
  
                if (!cnt ||
                    (bp->recovery_state != BNX2X_RECOVERY_DONE &&
                cnt--;
                udelay(50);
        }
 -      if (*wb_comp & DMAE_PCI_ERR_FLAG) {
 +      if (*comp & DMAE_PCI_ERR_FLAG) {
                BNX2X_ERR("DMAE PCI error!\n");
                rc = DMAE_PCI_ERROR;
        }
@@@ -574,7 -574,7 +574,7 @@@ void bnx2x_write_dmae(struct bnx2x *bp
        dmae.len = len32;
  
        /* issue the command and wait for completion */
 -      rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
 +      rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
        if (rc) {
                BNX2X_ERR("DMAE returned failure %d\n", rc);
                bnx2x_panic();
@@@ -611,7 -611,7 +611,7 @@@ void bnx2x_read_dmae(struct bnx2x *bp, 
        dmae.len = len32;
  
        /* issue the command and wait for completion */
 -      rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
 +      rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
        if (rc) {
                BNX2X_ERR("DMAE returned failure %d\n", rc);
                bnx2x_panic();
@@@ -751,10 -751,6 +751,10 @@@ static int bnx2x_mc_assert(struct bnx2
        return rc;
  }
  
 +#define MCPR_TRACE_BUFFER_SIZE        (0x800)
 +#define SCRATCH_BUFFER_SIZE(bp)       \
 +      (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
 +
  void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
  {
        u32 addr, val;
                trace_shmem_base = bp->common.shmem_base;
        else
                trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
 -      addr = trace_shmem_base - 0x800;
 +
 +      /* sanity */
 +      if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
 +          trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
 +                              SCRATCH_BUFFER_SIZE(bp)) {
 +              BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
 +                        trace_shmem_base);
 +              return;
 +      }
 +
 +      addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
  
        /* validate TRCB signature */
        mark = REG_RD(bp, addr);
        /* read cyclic buffer pointer */
        addr += 4;
        mark = REG_RD(bp, addr);
 -      mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
 -                      + ((mark + 0x3) & ~0x3) - 0x08000000;
 +      mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
 +      if (mark >= trace_shmem_base || mark < addr + 4) {
 +              BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
 +              return;
 +      }
        printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
  
        printk("%s", lvl);
  
        /* dump buffer after the mark */
 -      for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
 +      for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
                for (word = 0; word < 8; word++)
                        data[word] = htonl(REG_RD(bp, offset + 4*word));
                data[8] = 0x0;
@@@ -4297,60 -4280,65 +4297,60 @@@ static void _print_next_block(int idx, 
        pr_cont("%s%s", idx ? ", " : "", blk);
  }
  
 -static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
 -                                          int par_num, bool print)
 +static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
 +                                          int *par_num, bool print)
  {
 -      int i = 0;
 -      u32 cur_bit = 0;
 +      u32 cur_bit;
 +      bool res;
 +      int i;
 +
 +      res = false;
 +
        for (i = 0; sig; i++) {
 -              cur_bit = ((u32)0x1 << i);
 +              cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
 -                      switch (cur_bit) {
 -                      case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "BRB");
 +                      res |= true; /* Each bit is real error! */
 +
 +                      if (print) {
 +                              switch (cur_bit) {
 +                              case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++, "BRB");
                                        _print_parity(bp,
                                                      BRB1_REG_BRB1_PRTY_STS);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "PARSER");
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++,
 +                                                        "PARSER");
                                        _print_parity(bp, PRS_REG_PRS_PRTY_STS);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "TSDM");
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++, "TSDM");
                                        _print_parity(bp,
                                                      TSDM_REG_TSDM_PRTY_STS);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++,
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++,
                                                          "SEARCHER");
                                        _print_parity(bp, SRC_REG_SRC_PRTY_STS);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "TCM");
 -                                      _print_parity(bp,
 -                                                    TCM_REG_TCM_PRTY_STS);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "TSEMI");
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++, "TCM");
 +                                      _print_parity(bp, TCM_REG_TCM_PRTY_STS);
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++,
 +                                                        "TSEMI");
                                        _print_parity(bp,
                                                      TSEM_REG_TSEM_PRTY_STS_0);
                                        _print_parity(bp,
                                                      TSEM_REG_TSEM_PRTY_STS_1);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "XPB");
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++, "XPB");
                                        _print_parity(bp, GRCBASE_XPB +
                                                          PB_REG_PB_PRTY_STS);
 +                                      break;
                                }
 -                              break;
                        }
  
                        /* Clear the bit */
                }
        }
  
 -      return par_num;
 +      return res;
  }
  
 -static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
 -                                          int par_num, bool *global,
 +static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
 +                                          int *par_num, bool *global,
                                            bool print)
  {
 -      int i = 0;
 -      u32 cur_bit = 0;
 +      u32 cur_bit;
 +      bool res;
 +      int i;
 +
 +      res = false;
 +
        for (i = 0; sig; i++) {
 -              cur_bit = ((u32)0x1 << i);
 +              cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
 +                      res |= true; /* Each bit is real error! */
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "PBF");
 +                                      _print_next_block((*par_num)++, "PBF");
                                        _print_parity(bp, PBF_REG_PBF_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "QM");
 +                                      _print_next_block((*par_num)++, "QM");
                                        _print_parity(bp, QM_REG_QM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "TM");
 +                                      _print_next_block((*par_num)++, "TM");
                                        _print_parity(bp, TM_REG_TM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "XSDM");
 +                                      _print_next_block((*par_num)++, "XSDM");
                                        _print_parity(bp,
                                                      XSDM_REG_XSDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "XCM");
 +                                      _print_next_block((*par_num)++, "XCM");
                                        _print_parity(bp, XCM_REG_XCM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "XSEMI");
 +                                      _print_next_block((*par_num)++,
 +                                                        "XSEMI");
                                        _print_parity(bp,
                                                      XSEM_REG_XSEM_PRTY_STS_0);
                                        _print_parity(bp,
                                break;
                        case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++,
 +                                      _print_next_block((*par_num)++,
                                                          "DOORBELLQ");
                                        _print_parity(bp,
                                                      DORQ_REG_DORQ_PRTY_STS);
                                break;
                        case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "NIG");
 +                                      _print_next_block((*par_num)++, "NIG");
                                        if (CHIP_IS_E1x(bp)) {
                                                _print_parity(bp,
                                                        NIG_REG_NIG_PRTY_STS);
                                break;
                        case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
                                if (print)
 -                                      _print_next_block(par_num++,
 +                                      _print_next_block((*par_num)++,
                                                          "VAUX PCI CORE");
                                *global = true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "DEBUG");
 +                                      _print_next_block((*par_num)++,
 +                                                        "DEBUG");
                                        _print_parity(bp, DBG_REG_DBG_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "USDM");
 +                                      _print_next_block((*par_num)++, "USDM");
                                        _print_parity(bp,
                                                      USDM_REG_USDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "UCM");
 +                                      _print_next_block((*par_num)++, "UCM");
                                        _print_parity(bp, UCM_REG_UCM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "USEMI");
 +                                      _print_next_block((*par_num)++,
 +                                                        "USEMI");
                                        _print_parity(bp,
                                                      USEM_REG_USEM_PRTY_STS_0);
                                        _print_parity(bp,
                                break;
                        case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "UPB");
 +                                      _print_next_block((*par_num)++, "UPB");
                                        _print_parity(bp, GRCBASE_UPB +
                                                          PB_REG_PB_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "CSDM");
 +                                      _print_next_block((*par_num)++, "CSDM");
                                        _print_parity(bp,
                                                      CSDM_REG_CSDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
                                if (print) {
 -                                      _print_next_block(par_num++, "CCM");
 +                                      _print_next_block((*par_num)++, "CCM");
                                        _print_parity(bp, CCM_REG_CCM_PRTY_STS);
                                }
                                break;
                }
        }
  
 -      return par_num;
 +      return res;
  }
  
 -static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
 -                                          int par_num, bool print)
 +static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
 +                                          int *par_num, bool print)
  {
 -      int i = 0;
 -      u32 cur_bit = 0;
 +      u32 cur_bit;
 +      bool res;
 +      int i;
 +
 +      res = false;
 +
        for (i = 0; sig; i++) {
 -              cur_bit = ((u32)0x1 << i);
 +              cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
 -                      switch (cur_bit) {
 -                      case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "CSEMI");
 +                      res |= true; /* Each bit is real error! */
 +                      if (print) {
 +                              switch (cur_bit) {
 +                              case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++,
 +                                                        "CSEMI");
                                        _print_parity(bp,
                                                      CSEM_REG_CSEM_PRTY_STS_0);
                                        _print_parity(bp,
                                                      CSEM_REG_CSEM_PRTY_STS_1);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "PXP");
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++, "PXP");
                                        _print_parity(bp, PXP_REG_PXP_PRTY_STS);
                                        _print_parity(bp,
                                                      PXP2_REG_PXP2_PRTY_STS_0);
                                        _print_parity(bp,
                                                      PXP2_REG_PXP2_PRTY_STS_1);
 -                              }
 -                              break;
 -                      case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
 -                              if (print)
 -                                      _print_next_block(par_num++,
 -                                      "PXPPCICLOCKCLIENT");
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "CFC");
 +                                      break;
 +                              case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++,
 +                                                        "PXPPCICLOCKCLIENT");
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++, "CFC");
                                        _print_parity(bp,
                                                      CFC_REG_CFC_PRTY_STS);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "CDU");
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++, "CDU");
                                        _print_parity(bp, CDU_REG_CDU_PRTY_STS);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "DMAE");
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++, "DMAE");
                                        _print_parity(bp,
                                                      DMAE_REG_DMAE_PRTY_STS);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "IGU");
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++, "IGU");
                                        if (CHIP_IS_E1x(bp))
                                                _print_parity(bp,
                                                        HC_REG_HC_PRTY_STS);
                                        else
                                                _print_parity(bp,
                                                        IGU_REG_IGU_PRTY_STS);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "MISC");
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++, "MISC");
                                        _print_parity(bp,
                                                      MISC_REG_MISC_PRTY_STS);
 +                                      break;
                                }
 -                              break;
                        }
  
                        /* Clear the bit */
                }
        }
  
 -      return par_num;
 +      return res;
  }
  
 -static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
 -                                         bool *global, bool print)
 +static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
 +                                          int *par_num, bool *global,
 +                                          bool print)
  {
 -      int i = 0;
 -      u32 cur_bit = 0;
 +      bool res = false;
 +      u32 cur_bit;
 +      int i;
 +
        for (i = 0; sig; i++) {
 -              cur_bit = ((u32)0x1 << i);
 +              cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
                                if (print)
 -                                      _print_next_block(par_num++, "MCP ROM");
 +                                      _print_next_block((*par_num)++,
 +                                                        "MCP ROM");
                                *global = true;
 +                              res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
                                if (print)
 -                                      _print_next_block(par_num++,
 +                                      _print_next_block((*par_num)++,
                                                          "MCP UMP RX");
                                *global = true;
 +                              res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
                                if (print)
 -                                      _print_next_block(par_num++,
 +                                      _print_next_block((*par_num)++,
                                                          "MCP UMP TX");
                                *global = true;
 +                              res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
                                if (print)
 -                                      _print_next_block(par_num++,
 +                                      _print_next_block((*par_num)++,
                                                          "MCP SCPAD");
 -                              *global = true;
 +                              /* clear latched SCPAD PATIRY from MCP */
 +                              REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
 +                                     1UL << 10);
                                break;
                        }
  
                }
        }
  
 -      return par_num;
 +      return res;
  }
  
 -static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
 -                                          int par_num, bool print)
 +static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
 +                                          int *par_num, bool print)
  {
 -      int i = 0;
 -      u32 cur_bit = 0;
 +      u32 cur_bit;
 +      bool res;
 +      int i;
 +
 +      res = false;
 +
        for (i = 0; sig; i++) {
 -              cur_bit = ((u32)0x1 << i);
 +              cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
 -                      switch (cur_bit) {
 -                      case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "PGLUE_B");
 +                      res |= true; /* Each bit is real error! */
 +                      if (print) {
 +                              switch (cur_bit) {
 +                              case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++,
 +                                                        "PGLUE_B");
                                        _print_parity(bp,
 -                                              PGLUE_B_REG_PGLUE_B_PRTY_STS);
 -                              }
 -                              break;
 -                      case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
 -                              if (print) {
 -                                      _print_next_block(par_num++, "ATC");
 +                                                    PGLUE_B_REG_PGLUE_B_PRTY_STS);
 +                                      break;
 +                              case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
 +                                      _print_next_block((*par_num)++, "ATC");
                                        _print_parity(bp,
                                                      ATC_REG_ATC_PRTY_STS);
 +                                      break;
                                }
 -                              break;
                        }
 -
                        /* Clear the bit */
                        sig &= ~cur_bit;
                }
        }
  
 -      return par_num;
 +      return res;
  }
  
  static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
                              u32 *sig)
  {
 +      bool res = false;
 +
        if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
            (sig[1] & HW_PRTY_ASSERT_SET_1) ||
            (sig[2] & HW_PRTY_ASSERT_SET_2) ||
                if (print)
                        netdev_err(bp->dev,
                                   "Parity errors detected in blocks: ");
 -              par_num = bnx2x_check_blocks_with_parity0(bp,
 -                      sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
 -              par_num = bnx2x_check_blocks_with_parity1(bp,
 -                      sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
 -              par_num = bnx2x_check_blocks_with_parity2(bp,
 -                      sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
 -              par_num = bnx2x_check_blocks_with_parity3(
 -                      sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
 -              par_num = bnx2x_check_blocks_with_parity4(bp,
 -                      sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
 +              res |= bnx2x_check_blocks_with_parity0(bp,
 +                      sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
 +              res |= bnx2x_check_blocks_with_parity1(bp,
 +                      sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
 +              res |= bnx2x_check_blocks_with_parity2(bp,
 +                      sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
 +              res |= bnx2x_check_blocks_with_parity3(bp,
 +                      sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
 +              res |= bnx2x_check_blocks_with_parity4(bp,
 +                      sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
  
                if (print)
                        pr_cont("\n");
 +      }
  
 -              return true;
 -      } else
 -              return false;
 +      return res;
  }
  
  /**
@@@ -4729,14 -4703,6 +4729,14 @@@ bool bnx2x_chk_parity_attn(struct bnx2
        attn.sig[3] = REG_RD(bp,
                MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
                             port*4);
 +      /* Since MCP attentions can't be disabled inside the block, we need to
 +       * read AEU registers to see whether they're currently disabled
 +       */
 +      attn.sig[3] &= ((REG_RD(bp,
 +                              !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
 +                                    : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
 +                       MISC_AEU_ENABLE_MCP_PRTY_BITS) |
 +                      ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
  
        if (!CHIP_IS_E1x(bp))
                attn.sig[4] = REG_RD(bp,
@@@ -5481,24 -5447,26 +5481,24 @@@ static void bnx2x_timer(unsigned long d
        if (IS_PF(bp) &&
            !BP_NOMCP(bp)) {
                int mb_idx = BP_FW_MB_IDX(bp);
 -              u32 drv_pulse;
 -              u32 mcp_pulse;
 +              u16 drv_pulse;
 +              u16 mcp_pulse;
  
                ++bp->fw_drv_pulse_wr_seq;
                bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
 -              /* TBD - add SYSTEM_TIME */
                drv_pulse = bp->fw_drv_pulse_wr_seq;
                bnx2x_drv_pulse(bp);
  
                mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
                             MCP_PULSE_SEQ_MASK);
                /* The delta between driver pulse and mcp response
 -               * should be 1 (before mcp response) or 0 (after mcp response)
 +               * should not get too big. If the MFW is more than 5 pulses
 +               * behind, we should worry about it enough to generate an error
 +               * log.
                 */
 -              if ((drv_pulse != mcp_pulse) &&
 -                  (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
 -                      /* someone lost a heartbeat... */
 -                      BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
 +              if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
 +                      BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
                                  drv_pulse, mcp_pulse);
 -              }
        }
  
        if (bp->state == BNX2X_STATE_OPEN)
@@@ -7152,7 -7120,7 +7152,7 @@@ static int bnx2x_init_hw_port(struct bn
        int port = BP_PORT(bp);
        int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
        u32 low, high;
 -      u32 val;
 +      u32 val, reg;
  
        DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
  
        val |= CHIP_IS_E1(bp) ? 0 : 0x10;
        REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
  
 +      /* SCPAD_PARITY should NOT trigger close the gates */
 +      reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
 +      REG_WR(bp, reg,
 +             REG_RD(bp, reg) &
 +             ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
 +
 +      reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
 +      REG_WR(bp, reg,
 +             REG_RD(bp, reg) &
 +             ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
 +
        bnx2x_init_block(bp, BLOCK_NIG, init_phase);
  
        if (!CHIP_IS_E1x(bp)) {
@@@ -8695,7 -8652,6 +8695,7 @@@ u32 bnx2x_send_unload_req(struct bnx2x 
        else if (bp->wol) {
                u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
                u8 *mac_addr = bp->dev->dev_addr;
 +              struct pci_dev *pdev = bp->pdev;
                u32 val;
                u16 pmc;
  
                EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
  
                /* Enable the PME and clear the status */
 -              pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
 +              pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
                pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
 -              pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
 +              pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
  
                reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
  
@@@ -9916,7 -9872,7 +9916,7 @@@ static int bnx2x_prev_path_mark_eeh(str
  static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
  {
        struct bnx2x_prev_path_list *tmp_list;
 -      int rc = false;
 +      bool rc = false;
  
        if (down_trylock(&bnx2x_prev_sem))
                return false;
@@@ -10443,7 -10399,7 +10443,7 @@@ static void bnx2x_get_common_hwinfo(str
                break;
        }
  
 -      pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
 +      pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
        bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
  
        BNX2X_DEV_INFO("%sWoL capable\n",
@@@ -11186,14 -11142,6 +11186,14 @@@ static void bnx2x_get_mac_hwinfo(struc
                        bnx2x_get_cnic_mac_hwinfo(bp);
        }
  
 +      if (!BP_NOMCP(bp)) {
 +              /* Read physical port identifier from shmem */
 +              val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
 +              val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
 +              bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
 +              bp->flags |= HAS_PHYS_PORT_ID;
 +      }
 +
        memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
  
        if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
@@@ -11730,6 -11678,9 +11730,6 @@@ static int bnx2x_init_bp(struct bnx2x *
  static int bnx2x_open(struct net_device *dev)
  {
        struct bnx2x *bp = netdev_priv(dev);
 -      bool global = false;
 -      int other_engine = BP_PATH(bp) ? 0 : 1;
 -      bool other_load_status, load_status;
        int rc;
  
        bp->stats_init = true;
         * Parity recovery is only relevant for PF driver.
         */
        if (IS_PF(bp)) {
 +              int other_engine = BP_PATH(bp) ? 0 : 1;
 +              bool other_load_status, load_status;
 +              bool global = false;
 +
                other_load_status = bnx2x_get_load_status(bp, other_engine);
                load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
                if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
        rc = bnx2x_nic_load(bp, LOAD_OPEN);
        if (rc)
                return rc;
 -      return bnx2x_open_epilog(bp);
 +      return 0;
  }
  
  /* called with rtnl_lock */
@@@ -12090,20 -12037,6 +12090,20 @@@ static int bnx2x_validate_addr(struct n
        return 0;
  }
  
 +static int bnx2x_get_phys_port_id(struct net_device *netdev,
 +                                struct netdev_phys_port_id *ppid)
 +{
 +      struct bnx2x *bp = netdev_priv(netdev);
 +
 +      if (!(bp->flags & HAS_PHYS_PORT_ID))
 +              return -EOPNOTSUPP;
 +
 +      ppid->id_len = sizeof(bp->phys_port_id);
 +      memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
 +
 +      return 0;
 +}
 +
  static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_open               = bnx2x_open,
        .ndo_stop               = bnx2x_close,
  #ifdef CONFIG_NET_RX_BUSY_POLL
        .ndo_busy_poll          = bnx2x_low_latency_recv,
  #endif
 +      .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
  };
  
  static int bnx2x_set_coherency_mask(struct bnx2x *bp)
  {
        struct device *dev = &bp->pdev->dev;
  
-       if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
-               if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
-                       dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
-                       return -EIO;
-               }
-       } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
 -      if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) == 0) {
 -              bp->flags |= USING_DAC_FLAG;
 -      } else if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
++      if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
++          dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
                dev_err(dev, "System does not support DMA, aborting\n");
                return -EIO;
        }
@@@ -12208,7 -12137,8 +12204,7 @@@ static int bnx2x_init_dev(struct bnx2x 
        }
  
        if (IS_PF(bp)) {
 -              bp->pm_cap = pdev->pm_cap;
 -              if (bp->pm_cap == 0) {
 +              if (!pdev->pm_cap) {
                        dev_err(&bp->pdev->dev,
                                "Cannot find power management capability, aborting\n");
                        rc = -EIO;
                NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
                NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
        if (!CHIP_IS_E1x(bp)) {
 -              dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
 +              dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
 +                                  NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
                dev->hw_enc_features =
                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
                        NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
 +                      NETIF_F_GSO_IPIP |
 +                      NETIF_F_GSO_SIT |
                        NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
        }
  
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
  
        dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
 -      if (bp->flags & USING_DAC_FLAG)
 -              dev->features |= NETIF_F_HIGHDMA;
 +      dev->features |= NETIF_F_HIGHDMA;
  
        /* Add Loopback capability to the device */
        dev->hw_features |= NETIF_F_LOOPBACK;
@@@ -12336,11 -12264,34 +12332,11 @@@ err_out_release
  
  err_out_disable:
        pci_disable_device(pdev);
 -      pci_set_drvdata(pdev, NULL);
  
  err_out:
        return rc;
  }
  
 -static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
 -                                     enum bnx2x_pci_bus_speed *speed)
 -{
 -      u32 link_speed, val = 0;
 -
 -      pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
 -      *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
 -
 -      link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
 -
 -      switch (link_speed) {
 -      case 3:
 -              *speed = BNX2X_PCI_LINK_SPEED_8000;
 -              break;
 -      case 2:
 -              *speed = BNX2X_PCI_LINK_SPEED_5000;
 -              break;
 -      default:
 -              *speed = BNX2X_PCI_LINK_SPEED_2500;
 -      }
 -}
 -
  static int bnx2x_check_firmware(struct bnx2x *bp)
  {
        const struct firmware *firmware = bp->firmware;
@@@ -12651,24 -12602,24 +12647,24 @@@ static int set_max_cos_est(int chip_id
                return BNX2X_MULTI_TX_COS_E1X;
        case BCM57712:
        case BCM57712_MF:
 -      case BCM57712_VF:
                return BNX2X_MULTI_TX_COS_E2_E3A0;
        case BCM57800:
        case BCM57800_MF:
 -      case BCM57800_VF:
        case BCM57810:
        case BCM57810_MF:
        case BCM57840_4_10:
        case BCM57840_2_20:
        case BCM57840_O:
        case BCM57840_MFO:
 -      case BCM57810_VF:
        case BCM57840_MF:
 -      case BCM57840_VF:
        case BCM57811:
        case BCM57811_MF:
 -      case BCM57811_VF:
                return BNX2X_MULTI_TX_COS_E3B0;
 +      case BCM57712_VF:
 +      case BCM57800_VF:
 +      case BCM57810_VF:
 +      case BCM57840_VF:
 +      case BCM57811_VF:
                return 1;
        default:
                pr_err("Unknown board_type (%d), aborting\n", chip_id);
@@@ -12697,8 -12648,8 +12693,8 @@@ static int bnx2x_init_one(struct pci_de
  {
        struct net_device *dev = NULL;
        struct bnx2x *bp;
 -      int pcie_width;
 -      enum bnx2x_pci_bus_speed pcie_speed;
 +      enum pcie_link_width pcie_width;
 +      enum pci_bus_speed pcie_speed;
        int rc, max_non_def_sbs;
        int rx_count, tx_count, rss_count, doorbell_size;
        int max_cos_est;
                dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
                rtnl_unlock();
        }
 -
 -      bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
 -      BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
 -                     pcie_width, pcie_speed);
 -
 -      BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
 +      if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
 +          pcie_speed == PCI_SPEED_UNKNOWN ||
 +          pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
 +              BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
 +      else
 +              BNX2X_DEV_INFO(
 +                     "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
                       board_info[ent->driver_data].name,
                       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
                       pcie_width,
 -                     pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
 -                     pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
 -                     pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
 +                     pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
 +                     pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
 +                     pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
                       "Unknown",
                       dev->base_addr, bp->pdev->irq, dev->dev_addr);
  
@@@ -12878,6 -12828,7 +12874,6 @@@ init_one_exit
                pci_release_regions(pdev);
  
        pci_disable_device(pdev);
 -      pci_set_drvdata(pdev, NULL);
  
        return rc;
  }
@@@ -12960,6 -12911,7 +12956,6 @@@ static void __bnx2x_remove(struct pci_d
                pci_release_regions(pdev);
  
        pci_disable_device(pdev);
 -      pci_set_drvdata(pdev, NULL);
  }
  
  static void bnx2x_remove_one(struct pci_dev *pdev)
@@@ -13676,10 -13628,6 +13672,10 @@@ void bnx2x_setup_cnic_info(struct bnx2
        cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
        cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
  
 +      DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
 +         BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
 +         cp->iscsi_l2_cid);
 +
        if (NO_ISCSI_OOO(bp))
                cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
  }
@@@ -3212,6 -3212,7 +3212,6 @@@ bnad_init(struct bnad *bnad
        bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
        if (!bnad->bar0) {
                dev_err(&pdev->dev, "ioremap for bar0 failed\n");
 -              pci_set_drvdata(pdev, NULL);
                return -ENOMEM;
        }
        pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
@@@ -3299,17 -3300,12 +3299,12 @@@ bnad_pci_init(struct bnad *bnad
        err = pci_request_regions(pdev, BNAD_NAME);
        if (err)
                goto disable_device;
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                *using_dac = true;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-               if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err)
-                               goto release_regions;
-               }
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+               if (err)
+                       goto release_regions;
                *using_dac = false;
        }
        pci_set_master(pdev);
@@@ -22,7 -22,6 +22,7 @@@
  #include <asm/div64.h>
  #include <linux/aer.h>
  #include <linux/if_bridge.h>
 +#include <net/busy_poll.h>
  
  MODULE_VERSION(DRV_VER);
  MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@@ -307,13 -306,9 +307,13 @@@ static void *hw_stats_from_cmd(struct b
                struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
  
                return &cmd->hw_stats;
 -      } else  {
 +      } else if (BE3_chip(adapter)) {
                struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
  
 +              return &cmd->hw_stats;
 +      } else {
 +              struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
 +
                return &cmd->hw_stats;
        }
  }
@@@ -325,13 -320,9 +325,13 @@@ static void *be_erx_stats_from_cmd(stru
                struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
  
                return &hw_stats->erx;
 -      } else {
 +      } else if (BE3_chip(adapter)) {
                struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
  
 +              return &hw_stats->erx;
 +      } else {
 +              struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
 +
                return &hw_stats->erx;
        }
  }
@@@ -431,60 -422,6 +431,60 @@@ static void populate_be_v1_stats(struc
        adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
  }
  
 +static void populate_be_v2_stats(struct be_adapter *adapter)
 +{
 +      struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
 +      struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
 +      struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
 +      struct be_port_rxf_stats_v2 *port_stats =
 +                                      &rxf_stats->port[adapter->port_num];
 +      struct be_drv_stats *drvs = &adapter->drv_stats;
 +
 +      be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
 +      drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
 +      drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
 +      drvs->rx_pause_frames = port_stats->rx_pause_frames;
 +      drvs->rx_crc_errors = port_stats->rx_crc_errors;
 +      drvs->rx_control_frames = port_stats->rx_control_frames;
 +      drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
 +      drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
 +      drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
 +      drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
 +      drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
 +      drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
 +      drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
 +      drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
 +      drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
 +      drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
 +      drvs->rx_dropped_header_too_small =
 +              port_stats->rx_dropped_header_too_small;
 +      drvs->rx_input_fifo_overflow_drop =
 +              port_stats->rx_input_fifo_overflow_drop;
 +      drvs->rx_address_filtered = port_stats->rx_address_filtered;
 +      drvs->rx_alignment_symbol_errors =
 +              port_stats->rx_alignment_symbol_errors;
 +      drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
 +      drvs->tx_pauseframes = port_stats->tx_pauseframes;
 +      drvs->tx_controlframes = port_stats->tx_controlframes;
 +      drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
 +      drvs->jabber_events = port_stats->jabber_events;
 +      drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
 +      drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
 +      drvs->forwarded_packets = rxf_stats->forwarded_packets;
 +      drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
 +      drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
 +      drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
 +      adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
 +      if (be_roce_supported(adapter))  {
 +              drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
 +              drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
 +              drvs->rx_roce_frames = port_stats->roce_frames_received;
 +              drvs->roce_drops_crc = port_stats->roce_drops_crc;
 +              drvs->roce_drops_payload_len =
 +                      port_stats->roce_drops_payload_len;
 +      }
 +}
 +
  static void populate_lancer_stats(struct be_adapter *adapter)
  {
  
@@@ -552,7 -489,7 +552,7 @@@ static void populate_erx_stats(struct b
  
  void be_parse_stats(struct be_adapter *adapter)
  {
 -      struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
 +      struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
        struct be_rx_obj *rxo;
        int i;
        u32 erx_stat;
        } else {
                if (BE2_chip(adapter))
                        populate_be_v0_stats(adapter);
 -              else
 -                      /* for BE3 and Skyhawk */
 +              else if (BE3_chip(adapter))
 +                      /* for BE3 */
                        populate_be_v1_stats(adapter);
 +              else
 +                      populate_be_v2_stats(adapter);
  
 -              /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
 +              /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
                for_all_rx_queues(adapter, rxo, i) {
                        erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
                        populate_erx_stats(adapter, rxo, erx_stat);
@@@ -920,11 -855,11 +920,11 @@@ static struct sk_buff *be_xmit_workarou
        unsigned int eth_hdr_len;
        struct iphdr *ip;
  
 -      /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
 +      /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
         * may cause a transmit stall on that port. So the work-around is to
 -       * pad such packets to a 36-byte length.
 +       * pad short packets (<= 32 bytes) to a 36-byte length.
         */
 -      if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
 +      if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
                if (skb_padto(skb, 36))
                        goto tx_drop;
                skb->len = 36;
@@@ -1000,10 -935,8 +1000,10 @@@ static netdev_tx_t be_xmit(struct sk_bu
        u32 start = txq->head;
  
        skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
 -      if (!skb)
 +      if (!skb) {
 +              tx_stats(txo)->tx_drv_drops++;
                return NETDEV_TX_OK;
 +      }
  
        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
  
                be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
        } else {
                txq->head = start;
 +              tx_stats(txo)->tx_drv_drops++;
                dev_kfree_skb_any(skb);
        }
        return NETDEV_TX_OK;
@@@ -1081,40 -1013,18 +1081,40 @@@ static int be_vid_config(struct be_adap
        status = be_cmd_vlan_config(adapter, adapter->if_handle,
                                    vids, num, 1, 0);
  
 -      /* Set to VLAN promisc mode as setting VLAN filter failed */
        if (status) {
 -              dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
 -              dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
 -              goto set_vlan_promisc;
 +              /* Set to VLAN promisc mode as setting VLAN filter failed */
 +              if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
 +                      goto set_vlan_promisc;
 +              dev_err(&adapter->pdev->dev,
 +                      "Setting HW VLAN filtering failed.\n");
 +      } else {
 +              if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
 +                      /* hw VLAN filtering re-enabled. */
 +                      status = be_cmd_rx_filter(adapter,
 +                                                BE_FLAGS_VLAN_PROMISC, OFF);
 +                      if (!status) {
 +                              dev_info(&adapter->pdev->dev,
 +                                       "Disabling VLAN Promiscuous mode.\n");
 +                              adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
 +                              dev_info(&adapter->pdev->dev,
 +                                       "Re-Enabling HW VLAN filtering\n");
 +                      }
 +              }
        }
  
        return status;
  
  set_vlan_promisc:
 -      status = be_cmd_vlan_config(adapter, adapter->if_handle,
 -                                  NULL, 0, 1, 1);
 +      dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
 +
 +      status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
 +      if (!status) {
 +              dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
 +              dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
 +              adapter->flags |= BE_FLAGS_VLAN_PROMISC;
 +      } else
 +              dev_err(&adapter->pdev->dev,
 +                      "Failed to enable VLAN Promiscuous mode.\n");
        return status;
  }
  
@@@ -1123,6 -1033,10 +1123,6 @@@ static int be_vlan_add_vid(struct net_d
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
  
 -      if (!lancer_chip(adapter) && !be_physfn(adapter)) {
 -              status = -EINVAL;
 -              goto ret;
 -      }
  
        /* Packets with VID 0 are always received by Lancer by default */
        if (lancer_chip(adapter) && vid == 0)
@@@ -1145,6 -1059,11 +1145,6 @@@ static int be_vlan_rem_vid(struct net_d
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
  
 -      if (!lancer_chip(adapter) && !be_physfn(adapter)) {
 -              status = -EINVAL;
 -              goto ret;
 -      }
 -
        /* Packets with VID 0 are always received by Lancer by default */
        if (lancer_chip(adapter) && vid == 0)
                goto ret;
@@@ -1269,8 -1188,8 +1269,8 @@@ static int be_get_vf_config(struct net_
  
        vi->vf = vf;
        vi->tx_rate = vf_cfg->tx_rate;
 -      vi->vlan = vf_cfg->vlan_tag;
 -      vi->qos = 0;
 +      vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
 +      vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
        memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
  
        return 0;
@@@ -1280,29 -1199,28 +1280,29 @@@ static int be_set_vf_vlan(struct net_de
                        int vf, u16 vlan, u8 qos)
  {
        struct be_adapter *adapter = netdev_priv(netdev);
 +      struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
        int status = 0;
  
        if (!sriov_enabled(adapter))
                return -EPERM;
  
 -      if (vf >= adapter->num_vfs || vlan > 4095)
 +      if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
                return -EINVAL;
  
 -      if (vlan) {
 -              if (adapter->vf_cfg[vf].vlan_tag != vlan) {
 +      if (vlan || qos) {
 +              vlan |= qos << VLAN_PRIO_SHIFT;
 +              if (vf_cfg->vlan_tag != vlan) {
                        /* If this is new value, program it. Else skip. */
 -                      adapter->vf_cfg[vf].vlan_tag = vlan;
 -
 -                      status = be_cmd_set_hsw_config(adapter, vlan,
 -                              vf + 1, adapter->vf_cfg[vf].if_handle, 0);
 +                      vf_cfg->vlan_tag = vlan;
 +                      status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
 +                                                     vf_cfg->if_handle, 0);
                }
        } else {
                /* Reset Transparent Vlan Tagging. */
 -              adapter->vf_cfg[vf].vlan_tag = 0;
 -              vlan = adapter->vf_cfg[vf].def_vid;
 +              vf_cfg->vlan_tag = 0;
 +              vlan = vf_cfg->def_vid;
                status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
 -                      adapter->vf_cfg[vf].if_handle, 0);
 +                                             vf_cfg->if_handle, 0);
        }
  
  
@@@ -1343,79 -1261,53 +1343,79 @@@ static int be_set_vf_tx_rate(struct net
        return status;
  }
  
 -static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
 +static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
 +                        ulong now)
  {
 -      struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
 -      ulong now = jiffies;
 -      ulong delta = now - stats->rx_jiffies;
 -      u64 pkts;
 -      unsigned int start, eqd;
 +      aic->rx_pkts_prev = rx_pkts;
 +      aic->tx_reqs_prev = tx_pkts;
 +      aic->jiffies = now;
 +}
  
 -      if (!eqo->enable_aic) {
 -              eqd = eqo->eqd;
 -              goto modify_eqd;
 -      }
 +static void be_eqd_update(struct be_adapter *adapter)
 +{
 +      struct be_set_eqd set_eqd[MAX_EVT_QS];
 +      int eqd, i, num = 0, start;
 +      struct be_aic_obj *aic;
 +      struct be_eq_obj *eqo;
 +      struct be_rx_obj *rxo;
 +      struct be_tx_obj *txo;
 +      u64 rx_pkts, tx_pkts;
 +      ulong now;
 +      u32 pps, delta;
  
 -      if (eqo->idx >= adapter->num_rx_qs)
 -              return;
 +      for_all_evt_queues(adapter, eqo, i) {
 +              aic = &adapter->aic_obj[eqo->idx];
 +              if (!aic->enable) {
 +                      if (aic->jiffies)
 +                              aic->jiffies = 0;
 +                      eqd = aic->et_eqd;
 +                      goto modify_eqd;
 +              }
  
 -      stats = rx_stats(&adapter->rx_obj[eqo->idx]);
 +              rxo = &adapter->rx_obj[eqo->idx];
 +              do {
 +                      start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
 +                      rx_pkts = rxo->stats.rx_pkts;
 +              } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
  
 -      /* Wrapped around */
 -      if (time_before(now, stats->rx_jiffies)) {
 -              stats->rx_jiffies = now;
 -              return;
 -      }
 +              txo = &adapter->tx_obj[eqo->idx];
 +              do {
 +                      start = u64_stats_fetch_begin_bh(&txo->stats.sync);
 +                      tx_pkts = txo->stats.tx_reqs;
 +              } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
  
 -      /* Update once a second */
 -      if (delta < HZ)
 -              return;
  
 -      do {
 -              start = u64_stats_fetch_begin_bh(&stats->sync);
 -              pkts = stats->rx_pkts;
 -      } while (u64_stats_fetch_retry_bh(&stats->sync, start));
 -
 -      stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
 -      stats->rx_pkts_prev = pkts;
 -      stats->rx_jiffies = now;
 -      eqd = (stats->rx_pps / 110000) << 3;
 -      eqd = min(eqd, eqo->max_eqd);
 -      eqd = max(eqd, eqo->min_eqd);
 -      if (eqd < 10)
 -              eqd = 0;
 +              /* Skip, if wrapped around or first calculation */
 +              now = jiffies;
 +              if (!aic->jiffies || time_before(now, aic->jiffies) ||
 +                  rx_pkts < aic->rx_pkts_prev ||
 +                  tx_pkts < aic->tx_reqs_prev) {
 +                      be_aic_update(aic, rx_pkts, tx_pkts, now);
 +                      continue;
 +              }
 +
 +              delta = jiffies_to_msecs(now - aic->jiffies);
 +              pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
 +                      (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
 +              eqd = (pps / 15000) << 2;
  
 +              if (eqd < 8)
 +                      eqd = 0;
 +              eqd = min_t(u32, eqd, aic->max_eqd);
 +              eqd = max_t(u32, eqd, aic->min_eqd);
 +
 +              be_aic_update(aic, rx_pkts, tx_pkts, now);
  modify_eqd:
 -      if (eqd != eqo->cur_eqd) {
 -              be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
 -              eqo->cur_eqd = eqd;
 +              if (eqd != aic->prev_eqd) {
 +                      set_eqd[num].delay_multiplier = (eqd * 65)/100;
 +                      set_eqd[num].eq_id = eqo->q.id;
 +                      aic->prev_eqd = eqd;
 +                      num++;
 +              }
        }
 +
 +      if (num)
 +              be_cmd_modify_eqd(adapter, set_eqd, num);
  }
  
  static void be_rx_stats_update(struct be_rx_obj *rxo,
@@@ -1557,7 -1449,7 +1557,7 @@@ static void skb_fill_rx_data(struct be_
  }
  
  /* Process the RX completion indicated by rxcp when GRO is disabled */
 -static void be_rx_compl_process(struct be_rx_obj *rxo,
 +static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
                                struct be_rx_compl_info *rxcp)
  {
        struct be_adapter *adapter = rxo->adapter;
        skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
        if (netdev->features & NETIF_F_RXHASH)
                skb->rxhash = rxcp->rss_hash;
 -
 +      skb_mark_napi_id(skb, napi);
  
        if (rxcp->vlanf)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
@@@ -1640,7 -1532,6 +1640,7 @@@ static void be_rx_compl_process_gro(str
        skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
        if (adapter->netdev->features & NETIF_F_RXHASH)
                skb->rxhash = rxcp->rss_hash;
 +      skb_mark_napi_id(skb, napi);
  
        if (rxcp->vlanf)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
@@@ -1821,8 -1712,6 +1821,8 @@@ static void be_post_rx_frags(struct be_
  
        if (posted) {
                atomic_add(posted, &rxq->used);
 +              if (rxo->rx_post_starved)
 +                      rxo->rx_post_starved = false;
                be_rxq_notify(adapter, rxq->id, posted);
        } else if (atomic_read(&rxq->used) == 0) {
                /* Let be_worker replenish when memory is available */
@@@ -2025,7 -1914,6 +2025,7 @@@ static void be_evt_queues_destroy(struc
                if (eqo->q.created) {
                        be_eq_clean(eqo);
                        be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
 +                      napi_hash_del(&eqo->napi);
                        netif_napi_del(&eqo->napi);
                }
                be_queue_free(adapter, &eqo->q);
@@@ -2036,7 -1924,6 +2036,7 @@@ static int be_evt_queues_create(struct 
  {
        struct be_queue_info *eq;
        struct be_eq_obj *eqo;
 +      struct be_aic_obj *aic;
        int i, rc;
  
        adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
        for_all_evt_queues(adapter, eqo, i) {
                netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
                               BE_NAPI_WEIGHT);
 +              napi_hash_add(&eqo->napi);
 +              aic = &adapter->aic_obj[i];
                eqo->adapter = adapter;
                eqo->tx_budget = BE_TX_BUDGET;
                eqo->idx = i;
 -              eqo->max_eqd = BE_MAX_EQD;
 -              eqo->enable_aic = true;
 +              aic->max_eqd = BE_MAX_EQD;
 +              aic->enable = true;
  
                eq = &eqo->q;
                rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
@@@ -2268,7 -2153,7 +2268,7 @@@ static inline bool do_gro(struct be_rx_
  }
  
  static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
 -                      int budget)
 +                      int budget, int polling)
  {
        struct be_adapter *adapter = rxo->adapter;
        struct be_queue_info *rx_cq = &rxo->cq;
                        goto loop_continue;
                }
  
 -              if (do_gro(rxcp))
 +              /* Don't do gro when we're busy_polling */
 +              if (do_gro(rxcp) && polling != BUSY_POLLING)
                        be_rx_compl_process_gro(rxo, napi, rxcp);
                else
 -                      be_rx_compl_process(rxo, rxcp);
 +                      be_rx_compl_process(rxo, napi, rxcp);
 +
  loop_continue:
                be_rx_stats_update(rxo, rxcp);
        }
        if (work_done) {
                be_cq_notify(adapter, rx_cq->id, true, work_done);
  
 -              if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
 +              /* When an rx-obj gets into post_starved state, just
 +               * let be_worker do the posting.
 +               */
 +              if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
 +                  !rxo->rx_post_starved)
                        be_post_rx_frags(rxo, GFP_ATOMIC);
        }
  
@@@ -2361,7 -2240,6 +2361,7 @@@ int be_poll(struct napi_struct *napi, i
        struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
        struct be_adapter *adapter = eqo->adapter;
        int max_work = 0, work, i, num_evts;
 +      struct be_rx_obj *rxo;
        bool tx_done;
  
        num_evts = events_get(eqo);
                        max_work = budget;
        }
  
 -      /* This loop will iterate twice for EQ0 in which
 -       * completions of the last RXQ (default one) are also processed
 -       * For other EQs the loop iterates only once
 -       */
 -      for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
 -              work = be_process_rx(&adapter->rx_obj[i], napi, budget);
 -              max_work = max(work, max_work);
 +      if (be_lock_napi(eqo)) {
 +              /* This loop will iterate twice for EQ0 in which
 +               * completions of the last RXQ (default one) are also processed
 +               * For other EQs the loop iterates only once
 +               */
 +              for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
 +                      work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
 +                      max_work = max(work, max_work);
 +              }
 +              be_unlock_napi(eqo);
 +      } else {
 +              max_work = budget;
        }
  
        if (is_mcc_eqo(eqo))
        return max_work;
  }
  
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +static int be_busy_poll(struct napi_struct *napi)
 +{
 +      struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
 +      struct be_adapter *adapter = eqo->adapter;
 +      struct be_rx_obj *rxo;
 +      int i, work = 0;
 +
 +      if (!be_lock_busy_poll(eqo))
 +              return LL_FLUSH_BUSY;
 +
 +      for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
 +              work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
 +              if (work)
 +                      break;
 +      }
 +
 +      be_unlock_busy_poll(eqo);
 +      return work;
 +}
 +#endif
 +
  void be_detect_error(struct be_adapter *adapter)
  {
        u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
@@@ -2654,11 -2505,9 +2654,11 @@@ static int be_close(struct net_device *
  
        be_roce_dev_close(adapter);
  
 -      if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
 -              for_all_evt_queues(adapter, eqo, i)
 +      for_all_evt_queues(adapter, eqo, i) {
 +              if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
                        napi_disable(&eqo->napi);
 +                      be_disable_busy_poll(eqo);
 +              }
                adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
        }
  
@@@ -2769,7 -2618,6 +2769,7 @@@ static int be_open(struct net_device *n
  
        for_all_evt_queues(adapter, eqo, i) {
                napi_enable(&eqo->napi);
 +              be_enable_busy_poll(eqo);
                be_eq_notify(adapter, eqo->q.id, true, false, 0);
        }
        adapter->flags |= BE_FLAGS_NAPI_ENABLED;
@@@ -2954,7 -2802,7 +2954,7 @@@ static int be_vfs_if_create(struct be_a
        struct be_resources res = {0};
        struct be_vf_cfg *vf_cfg;
        u32 cap_flags, en_flags, vf;
 -      int status;
 +      int status = 0;
  
        cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
                    BE_IF_FLAGS_MULTICAST;
@@@ -3075,8 -2923,7 +3075,8 @@@ static int be_vf_setup(struct be_adapte
                        goto err;
                vf_cfg->def_vid = def_vlan;
  
 -              be_cmd_enable_vf(adapter, vf + 1);
 +              if (!old_vfs)
 +                      be_cmd_enable_vf(adapter, vf + 1);
        }
  
        if (!old_vfs) {
@@@ -3101,12 -2948,12 +3101,12 @@@ static void BEx_get_resources(struct be
        struct pci_dev *pdev = adapter->pdev;
        bool use_sriov = false;
  
 -      if (BE3_chip(adapter) && be_physfn(adapter)) {
 +      if (BE3_chip(adapter) && sriov_want(adapter)) {
                int max_vfs;
  
                max_vfs = pci_sriov_get_totalvfs(pdev);
                res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
 -              use_sriov = res->max_vfs && num_vfs;
 +              use_sriov = res->max_vfs;
        }
  
        if (be_physfn(adapter))
  
        if (adapter->function_mode & FLEX10_MODE)
                res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
 +      else if (adapter->function_mode & UMC_ENABLED)
 +              res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
        else
                res->max_vlans = BE_NUM_VLANS_SUPPORTED;
        res->max_mcast_mac = BE_MAX_MC;
  
 +      /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
        if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
 -          !be_physfn(adapter))
 +          !be_physfn(adapter) || (adapter->port_num > 1))
                res->max_tx_qs = 1;
        else
                res->max_tx_qs = BE3_MAX_TX_QS;
@@@ -3166,6 -3010,14 +3166,6 @@@ static int be_get_resources(struct be_a
                adapter->res = res;
        }
  
 -      /* For BE3 only check if FW suggests a different max-txqs value */
 -      if (BE3_chip(adapter)) {
 -              status = be_cmd_get_profile_config(adapter, &res, 0);
 -              if (!status && res.max_tx_qs)
 -                      adapter->res.max_tx_qs =
 -                              min(adapter->res.max_tx_qs, res.max_tx_qs);
 -      }
 -
        /* For Lancer, SH etc read per-function resource limits from FW.
         * GET_FUNC_CONFIG returns per function guaranteed limits.
         * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
@@@ -3379,12 -3231,6 +3379,12 @@@ static int be_setup(struct be_adapter *
  
        be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
  
 +      if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
 +              dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
 +                      adapter->fw_ver);
 +              dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
 +      }
 +
        if (adapter->vlans_added)
                be_vid_config(adapter);
  
                be_cmd_set_flow_control(adapter, adapter->tx_fc,
                                        adapter->rx_fc);
  
 -      if (be_physfn(adapter) && num_vfs) {
 +      if (sriov_want(adapter)) {
                if (be_max_vfs(adapter))
                        be_vf_setup(adapter);
                else
@@@ -4038,9 -3884,6 +4038,9 @@@ static const struct net_device_ops be_n
  #endif
        .ndo_bridge_setlink     = be_ndo_bridge_setlink,
        .ndo_bridge_getlink     = be_ndo_bridge_getlink,
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +      .ndo_busy_poll          = be_busy_poll
 +#endif
  };
  
  static void be_netdev_init(struct net_device *netdev)
@@@ -4101,6 -3944,11 +4101,6 @@@ static int be_roce_map_pci_bars(struct 
  static int be_map_pci_bars(struct be_adapter *adapter)
  {
        u8 __iomem *addr;
 -      u32 sli_intf;
 -
 -      pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
 -      adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
 -                              SLI_INTF_IF_TYPE_SHIFT;
  
        if (BEx_chip(adapter) && be_physfn(adapter)) {
                adapter->csr = pci_iomap(adapter->pdev, 2, 0);
@@@ -4213,11 -4061,9 +4213,11 @@@ static int be_stats_init(struct be_adap
                cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
        else if (BE2_chip(adapter))
                cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
 -      else
 -              /* BE3 and Skyhawk */
 +      else if (BE3_chip(adapter))
                cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
 +      else
 +              /* ALL non-BE ASICs */
 +              cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
  
        cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
                                      GFP_KERNEL);
@@@ -4251,6 -4097,7 +4251,6 @@@ static void be_remove(struct pci_dev *p
  
        pci_disable_pcie_error_reporting(pdev);
  
 -      pci_set_drvdata(pdev, NULL);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
  
@@@ -4399,6 -4246,7 +4399,6 @@@ static void be_worker(struct work_struc
        struct be_adapter *adapter =
                container_of(work, struct be_adapter, work.work);
        struct be_rx_obj *rxo;
 -      struct be_eq_obj *eqo;
        int i;
  
        /* when interrupts are not yet enabled, just reap any pending
                be_cmd_get_die_temperature(adapter);
  
        for_all_rx_queues(adapter, rxo, i) {
 -              if (rxo->rx_post_starved) {
 -                      rxo->rx_post_starved = false;
 +              /* Replenish RX-queues starved due to memory
 +               * allocation failures.
 +               */
 +              if (rxo->rx_post_starved)
                        be_post_rx_frags(rxo, GFP_KERNEL);
 -              }
        }
  
 -      for_all_evt_queues(adapter, eqo, i)
 -              be_eqd_update(adapter, eqo);
 +      be_eqd_update(adapter);
  
  reschedule:
        adapter->work_counter++;
@@@ -4487,30 -4335,20 +4487,22 @@@ static int be_probe(struct pci_dev *pde
        adapter->netdev = netdev;
        SET_NETDEV_DEV(netdev, &pdev->dev);
  
-       status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!status) {
-               status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (status < 0) {
-                       dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
-                       goto free_netdev;
-               }
                netdev->features |= NETIF_F_HIGHDMA;
        } else {
-               status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-               if (!status)
-                       status = dma_set_coherent_mask(&pdev->dev,
-                                                      DMA_BIT_MASK(32));
+               status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (status) {
                        dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
                        goto free_netdev;
                }
        }
  
 -      status = pci_enable_pcie_error_reporting(pdev);
 -      if (status)
 -              dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
 +      if (be_physfn(adapter)) {
 +              status = pci_enable_pcie_error_reporting(pdev);
 +              if (!status)
 +                      dev_info(&pdev->dev, "PCIe error reporting enabled\n");
 +      }
  
        status = be_ctrl_init(adapter);
        if (status)
@@@ -4581,6 -4419,7 +4573,6 @@@ ctrl_clean
        be_ctrl_cleanup(adapter);
  free_netdev:
        free_netdev(netdev);
 -      pci_set_drvdata(pdev, NULL);
  rel_reg:
        pci_release_regions(pdev);
  disable_dev:
@@@ -1018,19 -1018,14 +1018,14 @@@ static int e1000_probe(struct pci_dev *
         */
        pci_using_dac = 0;
        if ((hw->bus_type == e1000_bus_type_pcix) &&
-           !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-               /* according to DMA-API-HOWTO, coherent calls will always
-                * succeed if the set call did
-                */
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+           !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
                        pr_err("No usable DMA config, aborting\n");
                        goto err_dma;
                }
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        }
  
        netdev->netdev_ops = &e1000_netdev_ops;
@@@ -3917,7 -3912,8 +3912,7 @@@ static bool e1000_clean_tx_irq(struct e
                              "  next_to_watch        <%x>\n"
                              "  jiffies              <%lx>\n"
                              "  next_to_watch.status <%x>\n",
 -                              (unsigned long)((tx_ring - adapter->tx_ring) /
 -                                      sizeof(struct e1000_tx_ring)),
 +                              (unsigned long)(tx_ring - adapter->tx_ring),
                                readl(hw->hw_addr + tx_ring->tdh),
                                readl(hw->hw_addr + tx_ring->tdt),
                                tx_ring->next_to_use,
@@@ -4868,7 -4868,7 +4868,7 @@@ static void e1000_watchdog_task(struct 
                         */
                        if ((hw->phy.type == e1000_phy_igp_3 ||
                             hw->phy.type == e1000_phy_bm) &&
 -                          (hw->mac.autoneg == true) &&
 +                          hw->mac.autoneg &&
                            (adapter->link_speed == SPEED_10 ||
                             adapter->link_speed == SPEED_100) &&
                            (adapter->link_duplex == HALF_DUPLEX)) {
@@@ -6553,21 -6553,15 +6553,15 @@@ static int e1000_probe(struct pci_dev *
                return err;
  
        pci_using_dac = 0;
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (!err)
-                       pci_using_dac = 1;
+               pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev,
-                                       "No usable DMA configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev,
+                               "No usable DMA configuration, aborting\n");
+                       goto err_dma;
                }
        }
  
@@@ -182,7 -182,6 +182,7 @@@ static void igb_check_vf_rate_limit(str
  
  #ifdef CONFIG_PCI_IOV
  static int igb_vf_configure(struct igb_adapter *adapter, int vf);
 +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
  #endif
  
  #ifdef CONFIG_PM
@@@ -2035,21 -2034,15 +2035,15 @@@ static int igb_probe(struct pci_dev *pd
                return err;
  
        pci_using_dac = 0;
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (!err)
-                       pci_using_dac = 1;
+               pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev,
-                                       "No usable DMA configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev,
+                               "No usable DMA configuration, aborting\n");
+                       goto err_dma;
                }
        }
  
@@@ -2430,7 -2423,7 +2424,7 @@@ err_dma
  }
  
  #ifdef CONFIG_PCI_IOV
 -static int  igb_disable_sriov(struct pci_dev *pdev)
 +static int igb_disable_sriov(struct pci_dev *pdev)
  {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct igb_adapter *adapter = netdev_priv(netdev);
@@@ -2471,19 -2464,27 +2465,19 @@@ static int igb_enable_sriov(struct pci_
        int err = 0;
        int i;
  
 -      if (!adapter->msix_entries) {
 +      if (!adapter->msix_entries || num_vfs > 7) {
                err = -EPERM;
                goto out;
        }
 -
        if (!num_vfs)
                goto out;
 -      else if (old_vfs && old_vfs == num_vfs)
 -              goto out;
 -      else if (old_vfs && old_vfs != num_vfs)
 -              err = igb_disable_sriov(pdev);
 -
 -      if (err)
 -              goto out;
 -
 -      if (num_vfs > 7) {
 -              err = -EPERM;
 -              goto out;
 -      }
  
 -      adapter->vfs_allocated_count = num_vfs;
 +      if (old_vfs) {
 +              dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
 +                       old_vfs, max_vfs);
 +              adapter->vfs_allocated_count = old_vfs;
 +      } else
 +              adapter->vfs_allocated_count = num_vfs;
  
        adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
                                sizeof(struct vf_data_storage), GFP_KERNEL);
                goto out;
        }
  
 -      err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
 -      if (err)
 -              goto err_out;
 -
 +      /* only call pci_enable_sriov() if no VFs are allocated already */
 +      if (!old_vfs) {
 +              err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
 +              if (err)
 +                      goto err_out;
 +      }
        dev_info(&pdev->dev, "%d VFs allocated\n",
                 adapter->vfs_allocated_count);
        for (i = 0; i < adapter->vfs_allocated_count; i++)
@@@ -2618,7 -2617,7 +2612,7 @@@ static void igb_probe_vfs(struct igb_ad
                return;
  
        pci_sriov_set_totalvfs(pdev, 7);
 -      igb_enable_sriov(pdev, max_vfs);
 +      igb_pci_enable_sriov(pdev, max_vfs);
  
  #endif /* CONFIG_PCI_IOV */
  }
@@@ -5703,7 -5702,7 +5697,7 @@@ static void igb_vf_reset_msg(struct igb
  
        /* reply to reset with ack and vf mac address */
        msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
 -      memcpy(addr, vf_mac, 6);
 +      memcpy(addr, vf_mac, ETH_ALEN);
        igb_write_mbx(hw, msgbuf, 3, vf);
  }
  
@@@ -7833,26 -7832,4 +7827,26 @@@ s32 igb_write_i2c_byte(struct e1000_hw 
                return E1000_SUCCESS;
  
  }
 +
 +int igb_reinit_queues(struct igb_adapter *adapter)
 +{
 +      struct net_device *netdev = adapter->netdev;
 +      struct pci_dev *pdev = adapter->pdev;
 +      int err = 0;
 +
 +      if (netif_running(netdev))
 +              igb_close(netdev);
 +
 +      igb_clear_interrupt_scheme(adapter);
 +
 +      if (igb_init_interrupt_scheme(adapter, true)) {
 +              dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 +              return -ENOMEM;
 +      }
 +
 +      if (netif_running(netdev))
 +              err = igb_open(netdev);
 +
 +      return err;
 +}
  /* igb_main.c */
@@@ -2343,9 -2343,10 +2343,9 @@@ static int igbvf_change_mtu(struct net_
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  
 -      if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
 -              dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
 +      if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN ||
 +          max_frame > MAX_JUMBO_FRAME_SIZE)
                return -EINVAL;
 -      }
  
  #define MAX_STD_JUMBO_FRAME_SIZE 9234
        if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
@@@ -2637,21 -2638,15 +2637,15 @@@ static int igbvf_probe(struct pci_dev *
                return err;
  
        pci_using_dac = 0;
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (!err)
-                       pci_using_dac = 1;
+               pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev, "No usable DMA "
-                                       "configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev, "No usable DMA "
+                               "configuration, aborting\n");
+                       goto err_dma;
                }
        }
  
        if (ei->get_variants) {
                err = ei->get_variants(adapter);
                if (err)
 -                      goto err_ioremap;
 +                      goto err_get_variants;
        }
  
        /* setup adapter struct */
@@@ -2795,7 -2790,6 +2789,7 @@@ err_hw_init
        kfree(adapter->rx_ring);
  err_sw_init:
        igbvf_reset_interrupt_capability(adapter);
 +err_get_variants:
        iounmap(adapter->hw.hw_addr);
  err_ioremap:
        free_netdev(netdev);
@@@ -44,7 -44,6 +44,7 @@@
  #include <linux/ethtool.h>
  #include <linux/if.h>
  #include <linux/if_vlan.h>
 +#include <linux/if_macvlan.h>
  #include <linux/if_bridge.h>
  #include <linux/prefetch.h>
  #include <scsi/fc/fc_fcoe.h>
@@@ -133,7 -132,7 +133,7 @@@ static struct notifier_block dca_notifi
  static unsigned int max_vfs;
  module_param(max_vfs, uint, 0);
  MODULE_PARM_DESC(max_vfs,
 -               "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
 +               "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
  #endif /* CONFIG_PCI_IOV */
  
  static unsigned int allow_unsupported_sfp;
@@@ -154,6 -153,7 +154,6 @@@ MODULE_VERSION(DRV_VERSION)
  static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
                                          u32 reg, u16 *value)
  {
 -      int pos = 0;
        struct pci_dev *parent_dev;
        struct pci_bus *parent_bus;
  
        if (!parent_dev)
                return -1;
  
 -      pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP);
 -      if (!pos)
 +      if (!pci_is_pcie(parent_dev))
                return -1;
  
 -      pci_read_config_word(parent_dev, pos + reg, value);
 +      pcie_capability_read_word(parent_dev, reg, value);
        return 0;
  }
  
@@@ -246,7 -247,7 +246,7 @@@ static void ixgbe_check_minimum_link(st
                max_gts = 4 * width;
                break;
        case PCIE_SPEED_8_0GT:
 -              /* 128b/130b encoding only reduces throughput by 1% */
 +              /* 128b/130b encoding reduces throughput by less than 2% */
                max_gts = 8 * width;
                break;
        default:
                   width,
                   (speed == PCIE_SPEED_2_5GT ? "20%" :
                    speed == PCIE_SPEED_5_0GT ? "20%" :
 -                  speed == PCIE_SPEED_8_0GT ? "N/a" :
 +                  speed == PCIE_SPEED_8_0GT ? "<2%" :
                    "Unknown"));
  
        if (max_gts < expected_gts) {
@@@ -871,18 -872,11 +871,18 @@@ static u64 ixgbe_get_tx_completed(struc
  
  static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
  {
 -      struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
 -      struct ixgbe_hw *hw = &adapter->hw;
 +      struct ixgbe_adapter *adapter;
 +      struct ixgbe_hw *hw;
 +      u32 head, tail;
  
 -      u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
 -      u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
 +      if (ring->l2_accel_priv)
 +              adapter = ring->l2_accel_priv->real_adapter;
 +      else
 +              adapter = netdev_priv(ring->netdev);
 +
 +      hw = &adapter->hw;
 +      head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
 +      tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
  
        if (head != tail)
                return (head < tail) ?
@@@ -1591,7 -1585,7 +1591,7 @@@ static void ixgbe_rx_skb(struct ixgbe_q
  {
        struct ixgbe_adapter *adapter = q_vector->adapter;
  
 -      if (ixgbe_qv_ll_polling(q_vector))
 +      if (ixgbe_qv_busy_polling(q_vector))
                netif_receive_skb(skb);
        else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
                napi_gro_receive(&q_vector->napi, skb);
@@@ -2103,7 -2097,7 +2103,7 @@@ static int ixgbe_low_latency_recv(struc
  
        ixgbe_for_each_ring(ring, q_vector->rx) {
                found = ixgbe_clean_rx_irq(q_vector, ring, 4);
 -#ifdef LL_EXTENDED_STATS
 +#ifdef BP_EXTENDED_STATS
                if (found)
                        ring->stats.cleaned += found;
                else
@@@ -3011,7 -3005,7 +3011,7 @@@ void ixgbe_configure_tx_ring(struct ixg
                struct ixgbe_q_vector *q_vector = ring->q_vector;
  
                if (q_vector)
 -                      netif_set_xps_queue(adapter->netdev,
 +                      netif_set_xps_queue(ring->netdev,
                                            &q_vector->affinity_mask,
                                            ring->queue_index);
        }
@@@ -3401,7 -3395,7 +3401,7 @@@ static void ixgbe_setup_psrtype(struct 
  {
        struct ixgbe_hw *hw = &adapter->hw;
        int rss_i = adapter->ring_feature[RING_F_RSS].indices;
 -      int p;
 +      u16 pool;
  
        /* PSRTYPE must be initialized in non 82598 adapters */
        u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
        else if (rss_i > 1)
                psrtype |= 1 << 29;
  
 -      for (p = 0; p < adapter->num_rx_pools; p++)
 -              IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
 -                              psrtype);
 +      for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
 +              IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
  }
  
  static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
@@@ -3576,7 -3571,7 +3576,7 @@@ static void ixgbe_configure_rx(struct i
  {
        struct ixgbe_hw *hw = &adapter->hw;
        int i;
 -      u32 rxctrl;
 +      u32 rxctrl, rfctl;
  
        /* disable receives while setting up the descriptors */
        rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
        ixgbe_setup_psrtype(adapter);
        ixgbe_setup_rdrxctl(adapter);
  
 +      /* RSC Setup */
 +      rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
 +      rfctl &= ~IXGBE_RFCTL_RSC_DIS;
 +      if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
 +              rfctl |= IXGBE_RFCTL_RSC_DIS;
 +      IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
 +
        /* Program registers for the distribution of queues */
        ixgbe_setup_mrqc(adapter);
  
@@@ -3688,11 -3676,7 +3688,11 @@@ static void ixgbe_vlan_strip_disable(st
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
                for (i = 0; i < adapter->num_rx_queues; i++) {
 -                      j = adapter->rx_ring[i]->reg_idx;
 +                      struct ixgbe_ring *ring = adapter->rx_ring[i];
 +
 +                      if (ring->l2_accel_priv)
 +                              continue;
 +                      j = ring->reg_idx;
                        vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
                        vlnctrl &= ~IXGBE_RXDCTL_VME;
                        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@@ -3722,11 -3706,7 +3722,11 @@@ static void ixgbe_vlan_strip_enable(str
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
                for (i = 0; i < adapter->num_rx_queues; i++) {
 -                      j = adapter->rx_ring[i]->reg_idx;
 +                      struct ixgbe_ring *ring = adapter->rx_ring[i];
 +
 +                      if (ring->l2_accel_priv)
 +                              continue;
 +                      j = ring->reg_idx;
                        vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
                        vlnctrl |= IXGBE_RXDCTL_VME;
                        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@@ -3763,7 -3743,7 +3763,7 @@@ static int ixgbe_write_uc_addr_list(str
        unsigned int rar_entries = hw->mac.num_rar_entries - 1;
        int count = 0;
  
 -      /* In SR-IOV mode significantly less RAR entries are available */
 +      /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
                rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
  
@@@ -3838,6 -3818,14 +3838,6 @@@ void ixgbe_set_rx_mode(struct net_devic
                if (netdev->flags & IFF_ALLMULTI) {
                        fctrl |= IXGBE_FCTRL_MPE;
                        vmolr |= IXGBE_VMOLR_MPE;
 -              } else {
 -                      /*
 -                       * Write addresses to the MTA, if the attempt fails
 -                       * then we should just turn on promiscuous mode so
 -                       * that we can at least receive multicast traffic
 -                       */
 -                      hw->mac.ops.update_mc_addr_list(hw, netdev);
 -                      vmolr |= IXGBE_VMOLR_ROMPE;
                }
                ixgbe_vlan_filter_enable(adapter);
                hw->addr_ctrl.user_set_promisc = false;
                vmolr |= IXGBE_VMOLR_ROPE;
        }
  
 +      /* Write addresses to the MTA, if the attempt fails
 +       * then we should just turn on promiscuous mode so
 +       * that we can at least receive multicast traffic
 +       */
 +      hw->mac.ops.update_mc_addr_list(hw, netdev);
 +      vmolr |= IXGBE_VMOLR_ROMPE;
 +
        if (adapter->num_vfs)
                ixgbe_restore_vf_multicasts(adapter);
  
@@@ -3905,13 -3886,15 +3905,13 @@@ static void ixgbe_napi_disable_all(stru
  {
        int q_idx;
  
 -      local_bh_disable(); /* for ixgbe_qv_lock_napi() */
        for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
                napi_disable(&adapter->q_vector[q_idx]->napi);
 -              while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
 +              while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
                        pr_info("QV %d locked\n", q_idx);
 -                      mdelay(1);
 +                      usleep_range(1000, 20000);
                }
        }
 -      local_bh_enable();
  }
  
  #ifdef CONFIG_IXGBE_DCB
@@@ -4128,228 -4111,6 +4128,228 @@@ static void ixgbe_fdir_filter_restore(s
        spin_unlock(&adapter->fdir_perfect_lock);
  }
  
 +static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
 +                                    struct ixgbe_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u32 vmolr;
 +
 +      /* No unicast promiscuous support for VMDQ devices. */
 +      vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
 +      vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
 +
 +      /* clear the affected bit */
 +      vmolr &= ~IXGBE_VMOLR_MPE;
 +
 +      if (dev->flags & IFF_ALLMULTI) {
 +              vmolr |= IXGBE_VMOLR_MPE;
 +      } else {
 +              vmolr |= IXGBE_VMOLR_ROMPE;
 +              hw->mac.ops.update_mc_addr_list(hw, dev);
 +      }
 +      ixgbe_write_uc_addr_list(adapter->netdev);
 +      IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
 +}
 +
 +static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
 +                               u8 *addr, u16 pool)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      unsigned int entry;
 +
 +      entry = hw->mac.num_rar_entries - pool;
 +      hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
 +}
 +
 +static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
 +{
 +      struct ixgbe_adapter *adapter = vadapter->real_adapter;
 +      int rss_i = adapter->num_rx_queues_per_pool;
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      u16 pool = vadapter->pool;
 +      u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
 +                    IXGBE_PSRTYPE_UDPHDR |
 +                    IXGBE_PSRTYPE_IPV4HDR |
 +                    IXGBE_PSRTYPE_L2HDR |
 +                    IXGBE_PSRTYPE_IPV6HDR;
 +
 +      if (hw->mac.type == ixgbe_mac_82598EB)
 +              return;
 +
 +      if (rss_i > 3)
 +              psrtype |= 2 << 29;
 +      else if (rss_i > 1)
 +              psrtype |= 1 << 29;
 +
 +      IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
 +}
 +
 +/**
 + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
 + * @rx_ring: ring to free buffers from
 + **/
 +static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 +{
 +      struct device *dev = rx_ring->dev;
 +      unsigned long size;
 +      u16 i;
 +
 +      /* ring already cleared, nothing to do */
 +      if (!rx_ring->rx_buffer_info)
 +              return;
 +
 +      /* Free all the Rx ring sk_buffs */
 +      for (i = 0; i < rx_ring->count; i++) {
 +              struct ixgbe_rx_buffer *rx_buffer;
 +
 +              rx_buffer = &rx_ring->rx_buffer_info[i];
 +              if (rx_buffer->skb) {
 +                      struct sk_buff *skb = rx_buffer->skb;
 +                      if (IXGBE_CB(skb)->page_released) {
 +                              dma_unmap_page(dev,
 +                                             IXGBE_CB(skb)->dma,
 +                                             ixgbe_rx_bufsz(rx_ring),
 +                                             DMA_FROM_DEVICE);
 +                              IXGBE_CB(skb)->page_released = false;
 +                      }
 +                      dev_kfree_skb(skb);
 +              }
 +              rx_buffer->skb = NULL;
 +              if (rx_buffer->dma)
 +                      dma_unmap_page(dev, rx_buffer->dma,
 +                                     ixgbe_rx_pg_size(rx_ring),
 +                                     DMA_FROM_DEVICE);
 +              rx_buffer->dma = 0;
 +              if (rx_buffer->page)
 +                      __free_pages(rx_buffer->page,
 +                                   ixgbe_rx_pg_order(rx_ring));
 +              rx_buffer->page = NULL;
 +      }
 +
 +      size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
 +      memset(rx_ring->rx_buffer_info, 0, size);
 +
 +      /* Zero out the descriptor ring */
 +      memset(rx_ring->desc, 0, rx_ring->size);
 +
 +      rx_ring->next_to_alloc = 0;
 +      rx_ring->next_to_clean = 0;
 +      rx_ring->next_to_use = 0;
 +}
 +
 +static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
 +                                 struct ixgbe_ring *rx_ring)
 +{
 +      struct ixgbe_adapter *adapter = vadapter->real_adapter;
 +      int index = rx_ring->queue_index + vadapter->rx_base_queue;
 +
 +      /* shutdown specific queue receive and wait for dma to settle */
 +      ixgbe_disable_rx_queue(adapter, rx_ring);
 +      usleep_range(10000, 20000);
 +      ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
 +      ixgbe_clean_rx_ring(rx_ring);
 +      rx_ring->l2_accel_priv = NULL;
 +}
 +
 +int ixgbe_fwd_ring_down(struct net_device *vdev,
 +                      struct ixgbe_fwd_adapter *accel)
 +{
 +      struct ixgbe_adapter *adapter = accel->real_adapter;
 +      unsigned int rxbase = accel->rx_base_queue;
 +      unsigned int txbase = accel->tx_base_queue;
 +      int i;
 +
 +      netif_tx_stop_all_queues(vdev);
 +
 +      for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
 +              ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
 +              adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
 +      }
 +
 +      for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
 +              adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
 +              adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
 +      }
 +
 +
 +      return 0;
 +}
 +
 +static int ixgbe_fwd_ring_up(struct net_device *vdev,
 +                           struct ixgbe_fwd_adapter *accel)
 +{
 +      struct ixgbe_adapter *adapter = accel->real_adapter;
 +      unsigned int rxbase, txbase, queues;
 +      int i, baseq, err = 0;
 +
 +      if (!test_bit(accel->pool, &adapter->fwd_bitmask))
 +              return 0;
 +
 +      baseq = accel->pool * adapter->num_rx_queues_per_pool;
 +      netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
 +                 accel->pool, adapter->num_rx_pools,
 +                 baseq, baseq + adapter->num_rx_queues_per_pool,
 +                 adapter->fwd_bitmask);
 +
 +      accel->netdev = vdev;
 +      accel->rx_base_queue = rxbase = baseq;
 +      accel->tx_base_queue = txbase = baseq;
 +
 +      for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
 +              ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
 +
 +      for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
 +              adapter->rx_ring[rxbase + i]->netdev = vdev;
 +              adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
 +              ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
 +      }
 +
 +      for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
 +              adapter->tx_ring[txbase + i]->netdev = vdev;
 +              adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
 +      }
 +
 +      queues = min_t(unsigned int,
 +                     adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
 +      err = netif_set_real_num_tx_queues(vdev, queues);
 +      if (err)
 +              goto fwd_queue_err;
 +
 +      err = netif_set_real_num_rx_queues(vdev, queues);
 +      if (err)
 +              goto fwd_queue_err;
 +
 +      if (is_valid_ether_addr(vdev->dev_addr))
 +              ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
 +
 +      ixgbe_fwd_psrtype(accel);
 +      ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
 +      return err;
 +fwd_queue_err:
 +      ixgbe_fwd_ring_down(vdev, accel);
 +      return err;
 +}
 +
 +static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
 +{
 +      struct net_device *upper;
 +      struct list_head *iter;
 +      int err;
 +
 +      netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
 +              if (netif_is_macvlan(upper)) {
 +                      struct macvlan_dev *dfwd = netdev_priv(upper);
 +                      struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
 +
 +                      if (dfwd->fwd_priv) {
 +                              err = ixgbe_fwd_ring_up(upper, vadapter);
 +                              if (err)
 +                                      continue;
 +                      }
 +              }
 +      }
 +}
 +
  static void ixgbe_configure(struct ixgbe_adapter *adapter)
  {
        struct ixgbe_hw *hw = &adapter->hw;
  #endif /* IXGBE_FCOE */
        ixgbe_configure_tx(adapter);
        ixgbe_configure_rx(adapter);
 +      ixgbe_configure_dfwd(adapter);
  }
  
  static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@@ -4555,8 -4315,6 +4555,8 @@@ static void ixgbe_setup_gpie(struct ixg
  static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
  {
        struct ixgbe_hw *hw = &adapter->hw;
 +      struct net_device *upper;
 +      struct list_head *iter;
        int err;
        u32 ctrl_ext;
  
        /* enable transmits */
        netif_tx_start_all_queues(adapter->netdev);
  
 +      /* enable any upper devices */
 +      netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
 +              if (netif_is_macvlan(upper)) {
 +                      struct macvlan_dev *vlan = netdev_priv(upper);
 +
 +                      if (vlan->fwd_priv)
 +                              netif_tx_start_all_queues(upper);
 +              }
 +      }
 +
        /* bring the link up in the watchdog, this could race with our first
         * link up interrupt but shouldn't be a problem */
        adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@@ -4700,6 -4448,59 +4700,6 @@@ void ixgbe_reset(struct ixgbe_adapter *
                ixgbe_ptp_reset(adapter);
  }
  
 -/**
 - * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
 - * @rx_ring: ring to free buffers from
 - **/
 -static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 -{
 -      struct device *dev = rx_ring->dev;
 -      unsigned long size;
 -      u16 i;
 -
 -      /* ring already cleared, nothing to do */
 -      if (!rx_ring->rx_buffer_info)
 -              return;
 -
 -      /* Free all the Rx ring sk_buffs */
 -      for (i = 0; i < rx_ring->count; i++) {
 -              struct ixgbe_rx_buffer *rx_buffer;
 -
 -              rx_buffer = &rx_ring->rx_buffer_info[i];
 -              if (rx_buffer->skb) {
 -                      struct sk_buff *skb = rx_buffer->skb;
 -                      if (IXGBE_CB(skb)->page_released) {
 -                              dma_unmap_page(dev,
 -                                             IXGBE_CB(skb)->dma,
 -                                             ixgbe_rx_bufsz(rx_ring),
 -                                             DMA_FROM_DEVICE);
 -                              IXGBE_CB(skb)->page_released = false;
 -                      }
 -                      dev_kfree_skb(skb);
 -              }
 -              rx_buffer->skb = NULL;
 -              if (rx_buffer->dma)
 -                      dma_unmap_page(dev, rx_buffer->dma,
 -                                     ixgbe_rx_pg_size(rx_ring),
 -                                     DMA_FROM_DEVICE);
 -              rx_buffer->dma = 0;
 -              if (rx_buffer->page)
 -                      __free_pages(rx_buffer->page,
 -                                   ixgbe_rx_pg_order(rx_ring));
 -              rx_buffer->page = NULL;
 -      }
 -
 -      size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
 -      memset(rx_ring->rx_buffer_info, 0, size);
 -
 -      /* Zero out the descriptor ring */
 -      memset(rx_ring->desc, 0, rx_ring->size);
 -
 -      rx_ring->next_to_alloc = 0;
 -      rx_ring->next_to_clean = 0;
 -      rx_ring->next_to_use = 0;
 -}
 -
  /**
   * ixgbe_clean_tx_ring - Free Tx Buffers
   * @tx_ring: ring to be cleaned
@@@ -4777,8 -4578,6 +4777,8 @@@ void ixgbe_down(struct ixgbe_adapter *a
  {
        struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
 +      struct net_device *upper;
 +      struct list_head *iter;
        u32 rxctrl;
        int i;
  
        netif_carrier_off(netdev);
        netif_tx_disable(netdev);
  
 +      /* disable any upper devices */
 +      netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
 +              if (netif_is_macvlan(upper)) {
 +                      struct macvlan_dev *vlan = netdev_priv(upper);
 +
 +                      if (vlan->fwd_priv) {
 +                              netif_tx_stop_all_queues(upper);
 +                              netif_carrier_off(upper);
 +                              netif_tx_disable(upper);
 +                      }
 +              }
 +      }
 +
        ixgbe_irq_disable(adapter);
  
        ixgbe_napi_disable_all(adapter);
@@@ -5023,20 -4809,11 +5023,20 @@@ static int ixgbe_sw_init(struct ixgbe_a
        hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
  
  #ifdef CONFIG_PCI_IOV
 +      if (max_vfs > 0)
 +              e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
 +
        /* assign number of SR-IOV VFs */
 -      if (hw->mac.type != ixgbe_mac_82598EB)
 -              adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
 +      if (hw->mac.type != ixgbe_mac_82598EB) {
 +              if (max_vfs > 63) {
 +                      adapter->num_vfs = 0;
 +                      e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
 +              } else {
 +                      adapter->num_vfs = max_vfs;
 +              }
 +      }
 +#endif /* CONFIG_PCI_IOV */
  
 -#endif
        /* enable itr by default in dynamic mode */
        adapter->rx_itr_setting = 1;
        adapter->tx_itr_setting = 1;
                return -EIO;
        }
  
 +      /* PF holds first pool slot */
 +      set_bit(0, &adapter->fwd_bitmask);
        set_bit(__IXGBE_DOWN, &adapter->state);
  
        return 0;
@@@ -5361,7 -5136,7 +5361,7 @@@ static int ixgbe_change_mtu(struct net_
  static int ixgbe_open(struct net_device *netdev)
  {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 -      int err;
 +      int err, queues;
  
        /* disallow open during test */
        if (test_bit(__IXGBE_TESTING, &adapter->state))
                goto err_req_irq;
  
        /* Notify the stack of the actual queue counts. */
 -      err = netif_set_real_num_tx_queues(netdev,
 -                                         adapter->num_rx_pools > 1 ? 1 :
 -                                         adapter->num_tx_queues);
 +      if (adapter->num_rx_pools > 1)
 +              queues = adapter->num_rx_queues_per_pool;
 +      else
 +              queues = adapter->num_tx_queues;
 +
 +      err = netif_set_real_num_tx_queues(netdev, queues);
        if (err)
                goto err_set_queues;
  
 -
 -      err = netif_set_real_num_rx_queues(netdev,
 -                                         adapter->num_rx_pools > 1 ? 1 :
 -                                         adapter->num_rx_queues);
 +      if (adapter->num_rx_pools > 1 &&
 +          adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
 +              queues = IXGBE_MAX_L2A_QUEUES;
 +      else
 +              queues = adapter->num_rx_queues;
 +      err = netif_set_real_num_rx_queues(netdev, queues);
        if (err)
                goto err_set_queues;
  
@@@ -6223,16 -5993,8 +6223,16 @@@ static void ixgbe_sfp_link_config_subta
        adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
  
        speed = hw->phy.autoneg_advertised;
 -      if ((!speed) && (hw->mac.ops.get_link_capabilities))
 +      if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
                hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
 +
 +              /* setup the highest link when no autoneg */
 +              if (!autoneg) {
 +                      if (speed & IXGBE_LINK_SPEED_10GB_FULL)
 +                              speed = IXGBE_LINK_SPEED_10GB_FULL;
 +              }
 +      }
 +
        if (hw->mac.ops.setup_link)
                hw->mac.ops.setup_link(hw, speed, true);
  
@@@ -6990,9 -6752,8 +6990,9 @@@ out_drop
        return NETDEV_TX_OK;
  }
  
 -static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
 -                                  struct net_device *netdev)
 +static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
 +                                    struct net_device *netdev,
 +                                    struct ixgbe_ring *ring)
  {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_ring *tx_ring;
                skb_set_tail_pointer(skb, 17);
        }
  
 -      tx_ring = adapter->tx_ring[skb->queue_mapping];
 +      tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
 +
        return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
  }
  
 +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
 +                                  struct net_device *netdev)
 +{
 +      return __ixgbe_xmit_frame(skb, netdev, NULL);
 +}
 +
  /**
   * ixgbe_set_mac - Change the Ethernet Address of the NIC
   * @netdev: network interface device structure
@@@ -7275,7 -7029,6 +7275,7 @@@ int ixgbe_setup_tc(struct net_device *d
  {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
 +      bool pools;
  
        /* Hardware supports up to 8 traffic classes */
        if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
             tc < MAX_TRAFFIC_CLASS))
                return -EINVAL;
  
 +      pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
 +      if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
 +              return -EBUSY;
 +
        /* Hardware has to reinitialize queues and interrupts to
         * match packet buffer alignment. Unfortunately, the
         * hardware is not flexible enough to do this dynamically.
@@@ -7541,104 -7290,6 +7541,104 @@@ static int ixgbe_ndo_bridge_getlink(str
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
  }
  
 +static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
 +{
 +      struct ixgbe_fwd_adapter *fwd_adapter = NULL;
 +      struct ixgbe_adapter *adapter = netdev_priv(pdev);
 +      unsigned int limit;
 +      int pool, err;
 +
 +#ifdef CONFIG_RPS
 +      if (vdev->num_rx_queues != vdev->num_tx_queues) {
 +              netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
 +                          vdev->name);
 +              return ERR_PTR(-EINVAL);
 +      }
 +#endif
 +      /* Check for hardware restriction on number of rx/tx queues */
 +      if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
 +          vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
 +              netdev_info(pdev,
 +                          "%s: Supports RX/TX Queue counts 1,2, and 4\n",
 +                          pdev->name);
 +              return ERR_PTR(-EINVAL);
 +      }
 +
 +      if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
 +            adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
 +          (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
 +              return ERR_PTR(-EBUSY);
 +
 +      fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
 +      if (!fwd_adapter)
 +              return ERR_PTR(-ENOMEM);
 +
 +      pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
 +      adapter->num_rx_pools++;
 +      set_bit(pool, &adapter->fwd_bitmask);
 +      limit = find_last_bit(&adapter->fwd_bitmask, 32);
 +
 +      /* Enable VMDq flag so device will be set in VM mode */
 +      adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
 +      adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
 +      adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
 +
 +      /* Force reinit of ring allocation with VMDQ enabled */
 +      err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
 +      if (err)
 +              goto fwd_add_err;
 +      fwd_adapter->pool = pool;
 +      fwd_adapter->real_adapter = adapter;
 +      err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
 +      if (err)
 +              goto fwd_add_err;
 +      netif_tx_start_all_queues(vdev);
 +      return fwd_adapter;
 +fwd_add_err:
 +      /* unwind counter and free adapter struct */
 +      netdev_info(pdev,
 +                  "%s: dfwd hardware acceleration failed\n", vdev->name);
 +      clear_bit(pool, &adapter->fwd_bitmask);
 +      adapter->num_rx_pools--;
 +      kfree(fwd_adapter);
 +      return ERR_PTR(err);
 +}
 +
 +static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
 +{
 +      struct ixgbe_fwd_adapter *fwd_adapter = priv;
 +      struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
 +      unsigned int limit;
 +
 +      clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
 +      adapter->num_rx_pools--;
 +
 +      limit = find_last_bit(&adapter->fwd_bitmask, 32);
 +      adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
 +      ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
 +      ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
 +      netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
 +                 fwd_adapter->pool, adapter->num_rx_pools,
 +                 fwd_adapter->rx_base_queue,
 +                 fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
 +                 adapter->fwd_bitmask);
 +      kfree(fwd_adapter);
 +}
 +
 +static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
 +                                struct net_device *dev,
 +                                void *priv)
 +{
 +      struct ixgbe_fwd_adapter *fwd_adapter = priv;
 +      unsigned int queue;
 +      struct ixgbe_ring *tx_ring;
 +
 +      queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
 +      tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
 +
 +      return __ixgbe_xmit_frame(skb, dev, tx_ring);
 +}
 +
  static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_open               = ixgbe_open,
        .ndo_stop               = ixgbe_close,
        .ndo_fdb_add            = ixgbe_ndo_fdb_add,
        .ndo_bridge_setlink     = ixgbe_ndo_bridge_setlink,
        .ndo_bridge_getlink     = ixgbe_ndo_bridge_getlink,
 +      .ndo_dfwd_add_station   = ixgbe_fwd_add,
 +      .ndo_dfwd_del_station   = ixgbe_fwd_del,
 +      .ndo_dfwd_start_xmit    = ixgbe_fwd_xmit,
  };
  
  /**
   **/
  static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
  {
 -      struct ixgbe_hw *hw = &adapter->hw;
        struct list_head *entry;
        int physfns = 0;
  
 -      /* Some cards can not use the generic count PCIe functions method, and
 -       * so must be hardcoded to the correct value.
 +      /* Some cards can not use the generic count PCIe functions method,
 +       * because they are behind a parent switch, so we hardcode these with
 +       * the correct number of functions.
         */
 -      switch (hw->device_id) {
 -      case IXGBE_DEV_ID_82599_SFP_SF_QP:
 -      case IXGBE_DEV_ID_82599_QSFP_SF_QP:
 +      if (ixgbe_pcie_from_parent(&adapter->hw)) {
                physfns = 4;
 -              break;
 -      default:
 +      } else {
                list_for_each(entry, &adapter->pdev->bus_list) {
                        struct pci_dev *pdev =
                                list_entry(entry, struct pci_dev, bus_list);
@@@ -7824,19 -7475,14 +7824,14 @@@ static int ixgbe_probe(struct pci_dev *
        if (err)
                return err;
  
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev,
-                                       "No usable DMA configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev,
+                               "No usable DMA configuration, aborting\n");
+                       goto err_dma;
                }
                pci_using_dac = 0;
        }
@@@ -7987,8 -7633,7 +7982,8 @@@ skip_sriov
                           NETIF_F_TSO |
                           NETIF_F_TSO6 |
                           NETIF_F_RXHASH |
 -                         NETIF_F_RXCSUM;
 +                         NETIF_F_RXCSUM |
 +                         NETIF_F_HW_L2FW_DOFFLOAD;
  
        netdev->hw_features = netdev->features;
  
        if (ixgbe_pcie_from_parent(hw))
                ixgbe_get_parent_bus_info(adapter);
  
 -      /* print bus type/speed/width info */
 -      e_dev_info("(PCI Express:%s:%s) %pM\n",
 -                 (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
 -                  hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
 -                  hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
 -                  "Unknown"),
 -                 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
 -                  hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
 -                  hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
 -                  "Unknown"),
 -                 netdev->dev_addr);
 -
 -      err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
 -      if (err)
 -              strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
 -      if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
 -              e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
 -                         hw->mac.type, hw->phy.type, hw->phy.sfp_type,
 -                         part_str);
 -      else
 -              e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
 -                         hw->mac.type, hw->phy.type, part_str);
 -
        /* calculate the expected PCIe bandwidth required for optimal
         * performance. Note that some older parts will never have enough
         * bandwidth due to being older generation PCIe parts. We clamp these
        }
        ixgbe_check_minimum_link(adapter, expected_gts);
  
 +      err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
 +      if (err)
 +              strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
 +      if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
 +              e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
 +                         hw->mac.type, hw->phy.type, hw->phy.sfp_type,
 +                         part_str);
 +      else
 +              e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
 +                         hw->mac.type, hw->phy.type, part_str);
 +
 +      e_dev_info("%pM\n", netdev->dev_addr);
 +
        /* reset the hardware with the new settings */
        err = hw->mac.ops.start_hw(hw);
        if (err == IXGBE_ERR_EEPROM_VERSION) {
@@@ -58,7 -58,7 +58,7 @@@ const char ixgbevf_driver_name[] = "ixg
  static const char ixgbevf_driver_string[] =
        "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
  
 -#define DRV_VERSION "2.7.12-k"
 +#define DRV_VERSION "2.11.3-k"
  const char ixgbevf_driver_version[] = DRV_VERSION;
  static char ixgbevf_copyright[] =
        "Copyright (c) 2009 - 2012 Intel Corporation.";
@@@ -251,7 -251,7 +251,7 @@@ static bool ixgbevf_clean_tx_irq(struc
  
  #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
        if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
 -                   (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
 +                   (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
@@@ -299,30 -299,6 +299,30 @@@ static void ixgbevf_receive_skb(struct 
                netif_rx(skb);
  }
  
 +/**
 + * ixgbevf_rx_skb - Helper function to determine proper Rx method
 + * @q_vector: structure containing interrupt and ring information
 + * @skb: packet to send up
 + * @status: hardware indication of status of receive
 + * @rx_desc: rx descriptor
 + **/
 +static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
 +                         struct sk_buff *skb, u8 status,
 +                         union ixgbe_adv_rx_desc *rx_desc)
 +{
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +      skb_mark_napi_id(skb, &q_vector->napi);
 +
 +      if (ixgbevf_qv_busy_polling(q_vector)) {
 +              netif_receive_skb(skb);
 +              /* exit early if we busy polled */
 +              return;
 +      }
 +#endif /* CONFIG_NET_RX_BUSY_POLL */
 +
 +      ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
 +}
 +
  /**
   * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
   * @ring: pointer to Rx descriptor ring structure
@@@ -420,9 -396,9 +420,9 @@@ static inline void ixgbevf_irq_enable_q
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
  }
  
 -static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
 -                               struct ixgbevf_ring *rx_ring,
 -                               int budget)
 +static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
 +                              struct ixgbevf_ring *rx_ring,
 +                              int budget)
  {
        struct ixgbevf_adapter *adapter = q_vector->adapter;
        struct pci_dev *pdev = adapter->pdev;
                total_rx_bytes += skb->len;
                total_rx_packets++;
  
 -              /*
 -               * Work around issue of some types of VM to VM loop back
 -               * packets not getting split correctly
 -               */
 -              if (staterr & IXGBE_RXD_STAT_LB) {
 -                      u32 header_fixup_len = skb_headlen(skb);
 -                      if (header_fixup_len < 14)
 -                              skb_push(skb, header_fixup_len);
 -              }
                skb->protocol = eth_type_trans(skb, rx_ring->netdev);
  
                /* Workaround hardware that can't do proper VEPA multicast
                        goto next_desc;
                }
  
 -              ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
 +              ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
  
  next_desc:
                rx_desc->wb.upper.status_error = 0;
        }
  
        rx_ring->next_to_clean = i;
 -      cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
 +      cleaned_count = ixgbevf_desc_unused(rx_ring);
  
        if (cleaned_count)
                ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
        q_vector->rx.total_packets += total_rx_packets;
        q_vector->rx.total_bytes += total_rx_bytes;
  
 -      return !!budget;
 +      return total_rx_packets;
  }
  
  /**
@@@ -564,11 -549,6 +564,11 @@@ static int ixgbevf_poll(struct napi_str
        ixgbevf_for_each_ring(ring, q_vector->tx)
                clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
  
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +      if (!ixgbevf_qv_lock_napi(q_vector))
 +              return budget;
 +#endif
 +
        /* attempt to distribute budget to each queue fairly, but don't allow
         * the budget to go below 1 because we'll exit polling */
        if (q_vector->rx.count > 1)
  
        adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
        ixgbevf_for_each_ring(ring, q_vector->rx)
 -              clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
 -                                                     per_ring_budget);
 +              clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
 +                                                      per_ring_budget)
 +                                 < per_ring_budget);
        adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
  
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +      ixgbevf_qv_unlock_napi(q_vector);
 +#endif
 +
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
                return budget;
   * ixgbevf_write_eitr - write VTEITR register in hardware specific way
   * @q_vector: structure containing interrupt and ring information
   */
 -static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
 +void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
  {
        struct ixgbevf_adapter *adapter = q_vector->adapter;
        struct ixgbe_hw *hw = &adapter->hw;
        IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
  }
  
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +/* must be called with local_bh_disable()d */
 +static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
 +{
 +      struct ixgbevf_q_vector *q_vector =
 +                      container_of(napi, struct ixgbevf_q_vector, napi);
 +      struct ixgbevf_adapter *adapter = q_vector->adapter;
 +      struct ixgbevf_ring  *ring;
 +      int found = 0;
 +
 +      if (test_bit(__IXGBEVF_DOWN, &adapter->state))
 +              return LL_FLUSH_FAILED;
 +
 +      if (!ixgbevf_qv_lock_poll(q_vector))
 +              return LL_FLUSH_BUSY;
 +
 +      ixgbevf_for_each_ring(ring, q_vector->rx) {
 +              found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
 +#ifdef BP_EXTENDED_STATS
 +              if (found)
 +                      ring->bp_cleaned += found;
 +              else
 +                      ring->bp_misses++;
 +#endif
 +              if (found)
 +                      break;
 +      }
 +
 +      ixgbevf_qv_unlock_poll(q_vector);
 +
 +      return found;
 +}
 +#endif /* CONFIG_NET_RX_BUSY_POLL */
 +
  /**
   * ixgbevf_configure_msix - Configure MSI-X hardware
   * @adapter: board private structure
@@@ -815,12 -756,37 +815,12 @@@ static void ixgbevf_set_itr(struct ixgb
  static irqreturn_t ixgbevf_msix_other(int irq, void *data)
  {
        struct ixgbevf_adapter *adapter = data;
 -      struct pci_dev *pdev = adapter->pdev;
        struct ixgbe_hw *hw = &adapter->hw;
 -      u32 msg;
 -      bool got_ack = false;
  
        hw->mac.get_link_status = 1;
 -      if (!hw->mbx.ops.check_for_ack(hw))
 -              got_ack = true;
 -
 -      if (!hw->mbx.ops.check_for_msg(hw)) {
 -              hw->mbx.ops.read(hw, &msg, 1);
 -
 -              if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
 -                      mod_timer(&adapter->watchdog_timer,
 -                                round_jiffies(jiffies + 1));
 -                      adapter->link_up = false;
 -              }
  
 -              if (msg & IXGBE_VT_MSGTYPE_NACK)
 -                      dev_info(&pdev->dev,
 -                               "Last Request of type %2.2x to PF Nacked\n",
 -                               msg & 0xFF);
 -              hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
 -      }
 -
 -      /* checking for the ack clears the PFACK bit.  Place
 -       * it back in the v2p_mailbox cache so that anyone
 -       * polling for an ack will not miss it
 -       */
 -      if (got_ack)
 -              hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
 +      if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
 +              mod_timer(&adapter->watchdog_timer, jiffies);
  
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
  
@@@ -1141,21 -1107,6 +1141,21 @@@ static void ixgbevf_configure_srrctl(st
        IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
  }
  
 +static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +
 +      /* PSRTYPE must be initialized in 82599 */
 +      u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
 +                    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
 +                    IXGBE_PSRTYPE_L2HDR;
 +
 +      if (adapter->num_rx_queues > 1)
 +              psrtype |= 1 << 29;
 +
 +      IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
 +}
 +
  static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
  {
        struct ixgbe_hw *hw = &adapter->hw;
@@@ -1203,7 -1154,8 +1203,7 @@@ static void ixgbevf_configure_rx(struc
        int i, j;
        u32 rdlen;
  
 -      /* PSRTYPE must be initialized in 82599 */
 -      IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
 +      ixgbevf_setup_psrtype(adapter);
  
        /* set_rx_buffer_len must be called before ring initialization */
        ixgbevf_set_rx_buffer_len(adapter);
@@@ -1341,9 -1293,6 +1341,9 @@@ static void ixgbevf_napi_enable_all(str
  
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                q_vector = adapter->q_vector[q_idx];
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +              ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
 +#endif
                napi_enable(&q_vector->napi);
        }
  }
@@@ -1357,12 -1306,6 +1357,12 @@@ static void ixgbevf_napi_disable_all(st
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                q_vector = adapter->q_vector[q_idx];
                napi_disable(&q_vector->napi);
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +              while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
 +                      pr_info("QV %d locked\n", q_idx);
 +                      usleep_range(1000, 20000);
 +              }
 +#endif /* CONFIG_NET_RX_BUSY_POLL */
        }
  }
  
@@@ -1380,55 -1323,31 +1380,55 @@@ static void ixgbevf_configure(struct ix
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct ixgbevf_ring *ring = &adapter->rx_ring[i];
                ixgbevf_alloc_rx_buffers(adapter, ring,
 -                                       IXGBE_DESC_UNUSED(ring));
 +                                       ixgbevf_desc_unused(ring));
        }
  }
  
 -#define IXGBE_MAX_RX_DESC_POLL 10
 -static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
 -                                              int rxr)
 +#define IXGBEVF_MAX_RX_DESC_POLL 10
 +static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
 +                                       int rxr)
  {
        struct ixgbe_hw *hw = &adapter->hw;
 +      int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
 +      u32 rxdctl;
        int j = adapter->rx_ring[rxr].reg_idx;
 -      int k;
  
 -      for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
 -              if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
 -                      break;
 -              else
 -                      msleep(1);
 -      }
 -      if (k >= IXGBE_MAX_RX_DESC_POLL) {
 -              hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
 -                     "not set within the polling period\n", rxr);
 -      }
 +      do {
 +              usleep_range(1000, 2000);
 +              rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
 +      } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
 +
 +      if (!wait_loop)
 +              hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
 +                     rxr);
 +
 +      ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
 +                              (adapter->rx_ring[rxr].count - 1));
 +}
 +
 +static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
 +                                   struct ixgbevf_ring *ring)
 +{
 +      struct ixgbe_hw *hw = &adapter->hw;
 +      int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
 +      u32 rxdctl;
 +      u8 reg_idx = ring->reg_idx;
 +
 +      rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
 +      rxdctl &= ~IXGBE_RXDCTL_ENABLE;
 +
 +      /* write value back with RXDCTL.ENABLE bit cleared */
 +      IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
  
 -      ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
 -                              adapter->rx_ring[rxr].count - 1);
 +      /* the hardware may take up to 100us to really disable the rx queue */
 +      do {
 +              udelay(10);
 +              rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
 +      } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
 +
 +      if (!wait_loop)
 +              hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
 +                     reg_idx);
  }
  
  static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@@ -1626,6 -1545,8 +1626,6 @@@ void ixgbevf_up(struct ixgbevf_adapter 
  {
        struct ixgbe_hw *hw = &adapter->hw;
  
 -      ixgbevf_negotiate_api(adapter);
 -
        ixgbevf_reset_queues(adapter);
  
        ixgbevf_configure(adapter);
@@@ -1758,10 -1679,7 +1758,10 @@@ void ixgbevf_down(struct ixgbevf_adapte
  
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBEVF_DOWN, &adapter->state);
 -      /* disable receives */
 +
 +      /* disable all enabled rx queues */
 +      for (i = 0; i < adapter->num_rx_queues; i++)
 +              ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
  
        netif_tx_disable(netdev);
  
@@@ -1815,12 -1733,10 +1815,12 @@@ void ixgbevf_reset(struct ixgbevf_adapt
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
  
 -      if (hw->mac.ops.reset_hw(hw))
 +      if (hw->mac.ops.reset_hw(hw)) {
                hw_dbg(hw, "PF still resetting\n");
 -      else
 +      } else {
                hw->mac.ops.init_hw(hw);
 +              ixgbevf_negotiate_api(adapter);
 +      }
  
        if (is_valid_ether_addr(adapter->hw.mac.addr)) {
                memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@@ -2013,9 -1929,6 +2013,9 @@@ static int ixgbevf_alloc_q_vectors(stru
                q_vector->v_idx = q_idx;
                netif_napi_add(adapter->netdev, &q_vector->napi,
                               ixgbevf_poll, 64);
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +              napi_hash_add(&q_vector->napi);
 +#endif
                adapter->q_vector[q_idx] = q_vector;
        }
  
@@@ -2025,9 -1938,6 +2025,9 @@@ err_out
        while (q_idx) {
                q_idx--;
                q_vector = adapter->q_vector[q_idx];
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +              napi_hash_del(&q_vector->napi);
 +#endif
                netif_napi_del(&q_vector->napi);
                kfree(q_vector);
                adapter->q_vector[q_idx] = NULL;
@@@ -2051,9 -1961,6 +2051,9 @@@ static void ixgbevf_free_q_vectors(stru
                struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
  
                adapter->q_vector[q_idx] = NULL;
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +              napi_hash_del(&q_vector->napi);
 +#endif
                netif_napi_del(&q_vector->napi);
                kfree(q_vector);
        }
@@@ -2165,9 -2072,6 +2165,9 @@@ static int ixgbevf_sw_init(struct ixgbe
        hw->mac.max_tx_queues = 2;
        hw->mac.max_rx_queues = 2;
  
 +      /* lock to protect mailbox accesses */
 +      spin_lock_init(&adapter->mbx_lock);
 +
        err = hw->mac.ops.reset_hw(hw);
        if (err) {
                dev_info(&pdev->dev,
                        pr_err("init_shared_code failed: %d\n", err);
                        goto out;
                }
 +              ixgbevf_negotiate_api(adapter);
                err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
                if (err)
                        dev_info(&pdev->dev, "Error reading MAC address\n");
                memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
        }
  
 -      /* lock to protect mailbox accesses */
 -      spin_lock_init(&adapter->mbx_lock);
 -
        /* Enable dynamic interrupt throttling rates */
        adapter->rx_itr_setting = 1;
        adapter->tx_itr_setting = 1;
@@@ -2714,6 -2620,8 +2714,6 @@@ static int ixgbevf_open(struct net_devi
                }
        }
  
 -      ixgbevf_negotiate_api(adapter);
 -
        /* setup queue reg_idx and Rx queue count */
        err = ixgbevf_setup_queues(adapter);
        if (err)
@@@ -3102,7 -3010,7 +3102,7 @@@ static int __ixgbevf_maybe_stop_tx(stru
  
        /* We need to check again in a case another CPU has just
         * made room available. */
 -      if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
 +      if (likely(ixgbevf_desc_unused(tx_ring) < size))
                return -EBUSY;
  
        /* A reprieve! - use start_queue because it doesn't call schedule */
  
  static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
  {
 -      if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
 +      if (likely(ixgbevf_desc_unused(tx_ring) >= size))
                return 0;
        return __ixgbevf_maybe_stop_tx(tx_ring, size);
  }
@@@ -3308,8 -3216,6 +3308,8 @@@ static int ixgbevf_resume(struct pci_de
        }
        pci_set_master(pdev);
  
 +      ixgbevf_reset(adapter);
 +
        rtnl_lock();
        err = ixgbevf_init_interrupt_scheme(adapter);
        rtnl_unlock();
                return err;
        }
  
 -      ixgbevf_reset(adapter);
 -
        if (netif_running(netdev)) {
                err = ixgbevf_open(netdev);
                if (err)
@@@ -3385,9 -3293,6 +3385,9 @@@ static const struct net_device_ops ixgb
        .ndo_tx_timeout         = ixgbevf_tx_timeout,
        .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
 +#ifdef CONFIG_NET_RX_BUSY_POLL
 +      .ndo_busy_poll          = ixgbevf_busy_poll_recv,
 +#endif
  };
  
  static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@@ -3421,19 -3326,14 +3421,14 @@@ static int ixgbevf_probe(struct pci_de
        if (err)
                return err;
  
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev, "No usable DMA "
-                                       "configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev, "No usable DMA "
+                               "configuration, aborting\n");
+                       goto err_dma;
                }
                pci_using_dac = 0;
        }
@@@ -1545,15 -1545,16 +1545,16 @@@ static int octeon_mgmt_probe(struct pla
  
        mac = of_get_mac_address(pdev->dev.of_node);
  
 -      if (mac && is_valid_ether_addr(mac))
 +      if (mac)
                memcpy(netdev->dev_addr, mac, ETH_ALEN);
        else
                eth_hw_addr_random(netdev);
  
        p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
  
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
-       pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (result)
+               goto err;
  
        netif_carrier_off(netdev);
        result = register_netdev(netdev);
diff --combined drivers/of/platform.c
@@@ -21,6 -21,7 +21,6 @@@
  #include <linux/of_device.h>
  #include <linux/of_irq.h>
  #include <linux/of_platform.h>
 -#include <linux/of_reserved_mem.h>
  #include <linux/platform_device.h>
  
  const struct of_device_id of_default_bus_match_table[] = {
@@@ -215,11 -216,11 +215,11 @@@ static struct platform_device *of_platf
        dev->archdata.dma_mask = 0xffffffffUL;
  #endif
        dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 +      if (!dev->dev.dma_mask)
 +              dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
        dev->dev.bus = &platform_bus_type;
        dev->dev.platform_data = platform_data;
  
 -      of_reserved_mem_device_init(&dev->dev);
 -
        /* We do not fill the DMA ops for platform devices by default.
         * This is currently the responsibility of the platform code
         * to do such, possibly using a device notifier
  
        if (of_device_add(dev) != 0) {
                platform_device_put(dev);
 -              of_reserved_mem_device_release(&dev->dev);
                return NULL;
        }
  
@@@ -282,9 -284,6 +282,6 @@@ static struct amba_device *of_amba_devi
        else
                of_device_make_bus_id(&dev->dev);
  
-       /* setup amba-specific device info */
-       dev->dma_mask = ~0;
        /* Allow the HW Peripheral ID to be overridden */
        prop = of_get_property(node, "arm,primecell-periphid", NULL);
        if (prop)
@@@ -100,13 -100,14 +100,14 @@@ static int dwc2_driver_probe(struct pla
         */
        if (!dev->dev.dma_mask)
                dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
-       if (!dev->dev.coherent_dma_mask)
-               dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               return retval;
  
        irq = platform_get_irq(dev, 0);
        if (irq < 0) {
                dev_err(&dev->dev, "missing IRQ resource\n");
 -              return -EINVAL;
 +              return irq;
        }
  
        res = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@@ -493,8 -493,11 +493,8 @@@ struct et131x_adapter 
        spinlock_t send_hw_lock;
  
        spinlock_t rcv_lock;
 -      spinlock_t rcv_pend_lock;
        spinlock_t fbr_lock;
  
 -      spinlock_t phy_lock;
 -
        /* Packet Filter and look ahead size */
        u32 packet_filter;
  
@@@ -2774,9 -2777,10 +2774,9 @@@ static void et131x_handle_recv_interrup
                adapter->net_stats.rx_packets++;
  
                /* Set the status on the packet, either resources or success */
 -              if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
 -                      dev_warn(&adapter->pdev->dev,
 -                                  "RFD's are running out\n");
 -              }
 +              if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK)
 +                      dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
 +
                count++;
        }
  
@@@ -2902,9 -2906,8 +2902,9 @@@ static int nic_send_packet(struct et131
         * number of fragments. If needed, we can call this function,
         * although it is less efficient.
         */
 -      if (nr_frags > 23)
 -              return -EIO;
 +
 +      /* nr_frags should be no more than 18. */
 +      BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
  
        memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
  
@@@ -3097,10 -3100,11 +3097,10 @@@ static int send_packet(struct sk_buff *
                shbufva = (u16 *) skb->data;
  
                if ((shbufva[0] == 0xffff) &&
 -                  (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
 +                  (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff))
                        tcb->flags |= FMP_DEST_BROAD;
 -              } else if ((shbufva[0] & 0x3) == 0x0001) {
 +              else if ((shbufva[0] & 0x3) == 0x0001)
                        tcb->flags |=  FMP_DEST_MULTI;
 -              }
        }
  
        tcb->next = NULL;
@@@ -3922,7 -3926,9 +3922,7 @@@ static struct et131x_adapter *et131x_ad
        spin_lock_init(&adapter->tcb_ready_qlock);
        spin_lock_init(&adapter->send_hw_lock);
        spin_lock_init(&adapter->rcv_lock);
 -      spin_lock_init(&adapter->rcv_pend_lock);
        spin_lock_init(&adapter->fbr_lock);
 -      spin_lock_init(&adapter->phy_lock);
  
        adapter->registry_jumbo_packet = 1514;  /* 1514-9216 */
  
@@@ -4791,21 -4797,8 +4791,8 @@@ static int et131x_pci_setup(struct pci_
        pci_set_master(pdev);
  
        /* Check the DMA addressing support of this device */
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-               rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (rc < 0) {
-                       dev_err(&pdev->dev,
-                         "Unable to obtain 64 bit DMA for consistent allocations\n");
-                       goto err_release_res;
-               }
-       } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
-               rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-               if (rc < 0) {
-                       dev_err(&pdev->dev,
-                         "Unable to obtain 32 bit DMA for consistent allocations\n");
-                       goto err_release_res;
-               }
-       } else {
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
                dev_err(&pdev->dev, "No usable DMA addressing method\n");
                rc = -EIO;
                goto err_release_res;
@@@ -41,6 -41,7 +41,6 @@@ struct imx_drm_device 
        struct list_head                        encoder_list;
        struct list_head                        connector_list;
        struct mutex                            mutex;
 -      int                                     references;
        int                                     pipes;
        struct drm_fbdev_cma                    *fbhelper;
  };
@@@ -68,11 -69,6 +68,11 @@@ struct imx_drm_connector 
        struct module                           *owner;
  };
  
 +int imx_drm_crtc_id(struct imx_drm_crtc *crtc)
 +{
 +      return crtc->pipe;
 +}
 +
  static void imx_drm_driver_lastclose(struct drm_device *drm)
  {
        struct imx_drm_device *imxdrm = drm->dev_private;
@@@ -115,12 -111,18 +115,12 @@@ int imx_drm_crtc_panel_format_pins(stru
        struct imx_drm_crtc *imx_crtc;
        struct imx_drm_crtc_helper_funcs *helper;
  
 -      mutex_lock(&imxdrm->mutex);
 -
        list_for_each_entry(imx_crtc, &imxdrm->crtc_list, list)
                if (imx_crtc->crtc == crtc)
                        goto found;
  
 -      mutex_unlock(&imxdrm->mutex);
 -
        return -EINVAL;
  found:
 -      mutex_unlock(&imxdrm->mutex);
 -
        helper = &imx_crtc->imx_drm_helper_funcs;
        if (helper->set_interface_pix_fmt)
                return helper->set_interface_pix_fmt(crtc,
@@@ -190,18 -192,6 +190,18 @@@ static void imx_drm_disable_vblank(stru
        imx_drm_crtc->imx_drm_helper_funcs.disable_vblank(imx_drm_crtc->crtc);
  }
  
 +static void imx_drm_driver_preclose(struct drm_device *drm,
 +              struct drm_file *file)
 +{
 +      int i;
 +
 +      if (!file->is_master)
 +              return;
 +
 +      for (i = 0; i < 4; i++)
 +              imx_drm_disable_vblank(drm , i);
 +}
 +
  static const struct file_operations imx_drm_driver_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
@@@ -251,6 -241,8 +251,6 @@@ struct drm_device *imx_drm_device_get(v
                }
        }
  
 -      imxdrm->references++;
 -
        return imxdrm->drm;
  
  unwind_crtc:
@@@ -288,6 -280,8 +288,6 @@@ void imx_drm_device_put(void
        list_for_each_entry(enc, &imxdrm->encoder_list, list)
                module_put(enc->owner);
  
 -      imxdrm->references--;
 -
        mutex_unlock(&imxdrm->mutex);
  }
  EXPORT_SYMBOL_GPL(imx_drm_device_put);
@@@ -491,7 -485,7 +491,7 @@@ int imx_drm_add_crtc(struct drm_crtc *c
  
        mutex_lock(&imxdrm->mutex);
  
 -      if (imxdrm->references) {
 +      if (imxdrm->drm->open_count) {
                ret = -EBUSY;
                goto err_busy;
        }
@@@ -570,7 -564,7 +570,7 @@@ int imx_drm_add_encoder(struct drm_enco
  
        mutex_lock(&imxdrm->mutex);
  
 -      if (imxdrm->references) {
 +      if (imxdrm->drm->open_count) {
                ret = -EBUSY;
                goto err_busy;
        }
@@@ -658,14 -652,20 +658,14 @@@ int imx_drm_encoder_get_mux_id(struct i
        struct imx_drm_crtc *imx_crtc;
        int i = 0;
  
 -      mutex_lock(&imxdrm->mutex);
 -
        list_for_each_entry(imx_crtc, &imxdrm->crtc_list, list) {
                if (imx_crtc->crtc == crtc)
                        goto found;
                i++;
        }
  
 -      mutex_unlock(&imxdrm->mutex);
 -
        return -EINVAL;
  found:
 -      mutex_unlock(&imxdrm->mutex);
 -
        return i;
  }
  EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id);
@@@ -709,7 -709,7 +709,7 @@@ int imx_drm_add_connector(struct drm_co
  
        mutex_lock(&imxdrm->mutex);
  
 -      if (imxdrm->references) {
 +      if (imxdrm->drm->open_count) {
                ret = -EBUSY;
                goto err_busy;
        }
@@@ -779,26 -779,16 +779,26 @@@ static const struct drm_ioctl_desc imx_
  };
  
  static struct drm_driver imx_drm_driver = {
 -      .driver_features        = DRIVER_MODESET | DRIVER_GEM,
 +      .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
        .load                   = imx_drm_driver_load,
        .unload                 = imx_drm_driver_unload,
        .lastclose              = imx_drm_driver_lastclose,
 +      .preclose               = imx_drm_driver_preclose,
        .gem_free_object        = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .dumb_create            = drm_gem_cma_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
        .dumb_destroy           = drm_gem_dumb_destroy,
  
 +      .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
 +      .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
 +      .gem_prime_import       = drm_gem_prime_import,
 +      .gem_prime_export       = drm_gem_prime_export,
 +      .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
 +      .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
 +      .gem_prime_vmap         = drm_gem_cma_prime_vmap,
 +      .gem_prime_vunmap       = drm_gem_cma_prime_vunmap,
 +      .gem_prime_mmap         = drm_gem_cma_prime_mmap,
        .get_vblank_counter     = drm_vblank_count,
        .enable_vblank          = imx_drm_enable_vblank,
        .disable_vblank         = imx_drm_disable_vblank,
  
  static int imx_drm_platform_probe(struct platform_device *pdev)
  {
+       int ret;
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
        imx_drm_device->dev = &pdev->dev;
  
        return drm_platform_init(&imx_drm_driver, pdev);
@@@ -852,13 -848,11 +858,11 @@@ static int __init imx_drm_init(void
        INIT_LIST_HEAD(&imx_drm_device->encoder_list);
  
        imx_drm_pdev = platform_device_register_simple("imx-drm", -1, NULL, 0);
 -      if (!imx_drm_pdev) {
 -              ret = -EINVAL;
 +      if (IS_ERR(imx_drm_pdev)) {
 +              ret = PTR_ERR(imx_drm_pdev);
                goto err_pdev;
        }
  
-       imx_drm_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32),
        ret = platform_driver_register(&imx_drm_pdrv);
        if (ret)
                goto err_pdrv;
  #include <drm/drm_crtc_helper.h>
  #include <linux/fb.h>
  #include <linux/clk.h>
 +#include <linux/errno.h>
  #include <drm/drm_gem_cma_helper.h>
  #include <drm/drm_fb_cma_helper.h>
  
  #include "ipu-v3/imx-ipu-v3.h"
  #include "imx-drm.h"
 +#include "ipuv3-plane.h"
  
  #define DRIVER_DESC           "i.MX IPUv3 Graphics"
  
 -struct ipu_framebuffer {
 -      struct drm_framebuffer  base;
 -      void                    *virt;
 -      dma_addr_t              phys;
 -      size_t                  len;
 -};
 -
  struct ipu_crtc {
        struct device           *dev;
        struct drm_crtc         base;
        struct imx_drm_crtc     *imx_crtc;
 -      struct ipuv3_channel    *ipu_ch;
 +
 +      /* plane[0] is the full plane, plane[1] is the partial plane */
 +      struct ipu_plane        *plane[2];
 +
        struct ipu_dc           *dc;
 -      struct ipu_dp           *dp;
 -      struct dmfc_channel     *dmfc;
        struct ipu_di           *di;
        int                     enabled;
        struct drm_pending_vblank_event *page_flip_event;
  
  #define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base)
  
 -static int calc_vref(struct drm_display_mode *mode)
 -{
 -      unsigned long htotal, vtotal;
 -
 -      htotal = mode->htotal;
 -      vtotal = mode->vtotal;
 -
 -      if (!htotal || !vtotal)
 -              return 60;
 -
 -      return mode->clock * 1000 / vtotal / htotal;
 -}
 -
 -static int calc_bandwidth(struct drm_display_mode *mode, unsigned int vref)
 -{
 -      return mode->hdisplay * mode->vdisplay * vref;
 -}
 -
  static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
  {
        if (ipu_crtc->enabled)
                return;
  
        ipu_di_enable(ipu_crtc->di);
 -      ipu_dmfc_enable_channel(ipu_crtc->dmfc);
 -      ipu_idmac_enable_channel(ipu_crtc->ipu_ch);
        ipu_dc_enable_channel(ipu_crtc->dc);
 -      if (ipu_crtc->dp)
 -              ipu_dp_enable_channel(ipu_crtc->dp);
 +      ipu_plane_enable(ipu_crtc->plane[0]);
  
        ipu_crtc->enabled = 1;
  }
@@@ -74,8 -99,11 +74,8 @@@ static void ipu_fb_disable(struct ipu_c
        if (!ipu_crtc->enabled)
                return;
  
 -      if (ipu_crtc->dp)
 -              ipu_dp_disable_channel(ipu_crtc->dp);
 +      ipu_plane_disable(ipu_crtc->plane[0]);
        ipu_dc_disable_channel(ipu_crtc->dc);
 -      ipu_idmac_disable_channel(ipu_crtc->ipu_ch);
 -      ipu_dmfc_disable_channel(ipu_crtc->dmfc);
        ipu_di_disable(ipu_crtc->di);
  
        ipu_crtc->enabled = 0;
@@@ -131,6 -159,33 +131,6 @@@ static const struct drm_crtc_funcs ipu_
        .page_flip = ipu_page_flip,
  };
  
 -static int ipu_drm_set_base(struct drm_crtc *crtc, int x, int y)
 -{
 -      struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
 -      struct drm_gem_cma_object *cma_obj;
 -      struct drm_framebuffer *fb = crtc->fb;
 -      unsigned long phys;
 -
 -      cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
 -      if (!cma_obj) {
 -              DRM_LOG_KMS("entry is null.\n");
 -              return -EFAULT;
 -      }
 -
 -      phys = cma_obj->paddr;
 -      phys += x * (fb->bits_per_pixel >> 3);
 -      phys += y * fb->pitches[0];
 -
 -      dev_dbg(ipu_crtc->dev, "%s: phys: 0x%lx\n", __func__, phys);
 -      dev_dbg(ipu_crtc->dev, "%s: xy: %dx%d\n", __func__, x, y);
 -
 -      ipu_cpmem_set_stride(ipu_get_cpmem(ipu_crtc->ipu_ch), fb->pitches[0]);
 -      ipu_cpmem_set_buffer(ipu_get_cpmem(ipu_crtc->ipu_ch),
 -                        0, phys);
 -
 -      return 0;
 -}
 -
  static int ipu_crtc_mode_set(struct drm_crtc *crtc,
                               struct drm_display_mode *orig_mode,
                               struct drm_display_mode *mode,
                               struct drm_framebuffer *old_fb)
  {
        struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
 -      struct drm_framebuffer *fb = ipu_crtc->base.fb;
        int ret;
        struct ipu_di_signal_cfg sig_cfg = {};
        u32 out_pixel_fmt;
 -      struct ipu_ch_param __iomem *cpmem = ipu_get_cpmem(ipu_crtc->ipu_ch);
 -      int bpp;
 -      u32 v4l2_fmt;
  
        dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
                        mode->hdisplay);
        dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
                        mode->vdisplay);
  
 -      ipu_ch_param_zero(cpmem);
 -
 -      switch (fb->pixel_format) {
 -      case DRM_FORMAT_XRGB8888:
 -      case DRM_FORMAT_ARGB8888:
 -              v4l2_fmt = V4L2_PIX_FMT_RGB32;
 -              bpp = 32;
 -              break;
 -      case DRM_FORMAT_RGB565:
 -              v4l2_fmt = V4L2_PIX_FMT_RGB565;
 -              bpp = 16;
 -              break;
 -      case DRM_FORMAT_RGB888:
 -              v4l2_fmt = V4L2_PIX_FMT_RGB24;
 -              bpp = 24;
 -              break;
 -      default:
 -              dev_err(ipu_crtc->dev, "unsupported pixel format 0x%08x\n",
 -                              fb->pixel_format);
 -              return -EINVAL;
 -      }
 -
        out_pixel_fmt = ipu_crtc->interface_pix_fmt;
  
        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
                sig_cfg.Vsync_pol = 1;
  
        sig_cfg.enable_pol = 1;
 -      sig_cfg.clk_pol = 0;
 +      sig_cfg.clk_pol = 1;
        sig_cfg.width = mode->hdisplay;
        sig_cfg.height = mode->vdisplay;
        sig_cfg.pixel_fmt = out_pixel_fmt;
        sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
        sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin;
  
 -      if (ipu_crtc->dp) {
 -              ret = ipu_dp_setup_channel(ipu_crtc->dp, IPUV3_COLORSPACE_RGB,
 -                              IPUV3_COLORSPACE_RGB);
 -              if (ret) {
 -                      dev_err(ipu_crtc->dev,
 -                              "initializing display processor failed with %d\n",
 -                              ret);
 -                      return ret;
 -              }
 -              ipu_dp_set_global_alpha(ipu_crtc->dp, 1, 0, 1);
 -      }
 -
        ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di, sig_cfg.interlaced,
                        out_pixel_fmt, mode->hdisplay);
        if (ret) {
                return ret;
        }
  
 -      ipu_cpmem_set_resolution(cpmem, mode->hdisplay, mode->vdisplay);
 -      ipu_cpmem_set_fmt(cpmem, v4l2_fmt);
 -      ipu_cpmem_set_high_priority(ipu_crtc->ipu_ch);
 -
 -      ret = ipu_dmfc_init_channel(ipu_crtc->dmfc, mode->hdisplay);
 -      if (ret) {
 -              dev_err(ipu_crtc->dev,
 -                              "initializing dmfc channel failed with %d\n",
 -                              ret);
 -              return ret;
 -      }
 -
 -      ret = ipu_dmfc_alloc_bandwidth(ipu_crtc->dmfc,
 -                      calc_bandwidth(mode, calc_vref(mode)), 64);
 -      if (ret) {
 -              dev_err(ipu_crtc->dev,
 -                              "allocating dmfc bandwidth failed with %d\n",
 -                              ret);
 -              return ret;
 -      }
 -
 -      ipu_drm_set_base(crtc, x, y);
 -
 -      return 0;
 +      return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode, crtc->fb,
 +                                0, 0, mode->hdisplay, mode->vdisplay,
 +                                x, y, mode->hdisplay, mode->vdisplay);
  }
  
  static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
@@@ -218,7 -332,7 +218,7 @@@ static irqreturn_t ipu_irq_handler(int 
  
        if (ipu_crtc->newfb) {
                ipu_crtc->newfb = NULL;
 -              ipu_drm_set_base(&ipu_crtc->base, 0, 0);
 +              ipu_plane_set_base(ipu_crtc->plane[0], ipu_crtc->base.fb, 0, 0);
                ipu_crtc_handle_pageflip(ipu_crtc);
        }
  
@@@ -256,6 -370,10 +256,6 @@@ static struct drm_crtc_helper_funcs ipu
  
  static int ipu_enable_vblank(struct drm_crtc *crtc)
  {
 -      struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
 -
 -      enable_irq(ipu_crtc->irq);
 -
        return 0;
  }
  
@@@ -263,8 -381,7 +263,8 @@@ static void ipu_disable_vblank(struct d
  {
        struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
  
 -      disable_irq(ipu_crtc->irq);
 +      ipu_crtc->page_flip_event = NULL;
 +      ipu_crtc->newfb = NULL;
  }
  
  static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, u32 encoder_type,
@@@ -301,8 -418,12 +301,8 @@@ static const struct imx_drm_crtc_helper
  
  static void ipu_put_resources(struct ipu_crtc *ipu_crtc)
  {
 -      if (!IS_ERR_OR_NULL(ipu_crtc->ipu_ch))
 -              ipu_idmac_put(ipu_crtc->ipu_ch);
 -      if (!IS_ERR_OR_NULL(ipu_crtc->dmfc))
 -              ipu_dmfc_put(ipu_crtc->dmfc);
 -      if (!IS_ERR_OR_NULL(ipu_crtc->dp))
 -              ipu_dp_put(ipu_crtc->dp);
 +      if (!IS_ERR_OR_NULL(ipu_crtc->dc))
 +              ipu_dc_put(ipu_crtc->dc);
        if (!IS_ERR_OR_NULL(ipu_crtc->di))
                ipu_di_put(ipu_crtc->di);
  }
@@@ -313,12 -434,32 +313,12 @@@ static int ipu_get_resources(struct ipu
        struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
        int ret;
  
 -      ipu_crtc->ipu_ch = ipu_idmac_get(ipu, pdata->dma[0]);
 -      if (IS_ERR(ipu_crtc->ipu_ch)) {
 -              ret = PTR_ERR(ipu_crtc->ipu_ch);
 -              goto err_out;
 -      }
 -
        ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc);
        if (IS_ERR(ipu_crtc->dc)) {
                ret = PTR_ERR(ipu_crtc->dc);
                goto err_out;
        }
  
 -      ipu_crtc->dmfc = ipu_dmfc_get(ipu, pdata->dma[0]);
 -      if (IS_ERR(ipu_crtc->dmfc)) {
 -              ret = PTR_ERR(ipu_crtc->dmfc);
 -              goto err_out;
 -      }
 -
 -      if (pdata->dp >= 0) {
 -              ipu_crtc->dp = ipu_dp_get(ipu, pdata->dp);
 -              if (IS_ERR(ipu_crtc->dp)) {
 -                      ret = PTR_ERR(ipu_crtc->dp);
 -                      goto err_out;
 -              }
 -      }
 -
        ipu_crtc->di = ipu_di_get(ipu, pdata->di);
        if (IS_ERR(ipu_crtc->di)) {
                ret = PTR_ERR(ipu_crtc->di);
@@@ -336,9 -477,7 +336,9 @@@ static int ipu_crtc_init(struct ipu_crt
                struct ipu_client_platformdata *pdata)
  {
        struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
 +      int dp = -EINVAL;
        int ret;
 +      int id;
  
        ret = ipu_get_resources(ipu_crtc, pdata);
        if (ret) {
                goto err_put_resources;
        }
  
 -      ipu_crtc->irq = ipu_idmac_channel_irq(ipu, ipu_crtc->ipu_ch,
 -                      IPU_IRQ_EOF);
 +      if (pdata->dp >= 0)
 +              dp = IPU_DP_FLOW_SYNC_BG;
 +      id = imx_drm_crtc_id(ipu_crtc->imx_crtc);
 +      ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu,
 +                                          pdata->dma[0], dp, BIT(id), true);
 +      ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
 +      if (ret) {
 +              dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
 +                      ret);
 +              goto err_remove_crtc;
 +      }
 +
 +      /* If this crtc is using the DP, add an overlay plane */
 +      if (pdata->dp >= 0 && pdata->dma[1] > 0) {
 +              ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu,
 +                                                  pdata->dma[1],
 +                                                  IPU_DP_FLOW_SYNC_FG,
 +                                                  BIT(id), false);
 +              if (IS_ERR(ipu_crtc->plane[1]))
 +                      ipu_crtc->plane[1] = NULL;
 +      }
 +
 +      ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
        ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0,
                        "imx_drm", ipu_crtc);
        if (ret < 0) {
                dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
 -              goto err_put_resources;
 +              goto err_put_plane_res;
        }
  
 -      disable_irq(ipu_crtc->irq);
 -
        return 0;
  
 +err_put_plane_res:
 +      ipu_plane_put_resources(ipu_crtc->plane[0]);
 +err_remove_crtc:
 +      imx_drm_remove_crtc(ipu_crtc->imx_crtc);
  err_put_resources:
        ipu_put_resources(ipu_crtc);
  
@@@ -407,7 -523,9 +407,9 @@@ static int ipu_drm_probe(struct platfor
        if (!pdata)
                return -EINVAL;
  
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
  
        ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL);
        if (!ipu_crtc)
@@@ -430,7 -548,6 +432,7 @@@ static int ipu_drm_remove(struct platfo
  
        imx_drm_remove_crtc(ipu_crtc->imx_crtc);
  
 +      ipu_plane_put_resources(ipu_crtc->plane[0]);
        ipu_put_resources(ipu_crtc);
  
        return 0;
@@@ -108,17 -108,22 +108,16 @@@ static int ci_hdrc_imx_probe(struct pla
        }
  
        data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
 -      if (!IS_ERR(data->phy)) {
 -              ret = usb_phy_init(data->phy);
 -              if (ret) {
 -                      dev_err(&pdev->dev, "unable to init phy: %d\n", ret);
 -                      goto err_clk;
 -              }
 -      } else if (PTR_ERR(data->phy) == -EPROBE_DEFER) {
 -              ret = -EPROBE_DEFER;
 +      if (IS_ERR(data->phy)) {
 +              ret = PTR_ERR(data->phy);
                goto err_clk;
        }
  
        pdata.phy = data->phy;
  
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto err_clk;
  
        if (data->usbmisc_data) {
                ret = imx_usbmisc_init(data->usbmisc_data);
@@@ -169,6 -174,10 +168,6 @@@ static int ci_hdrc_imx_remove(struct pl
  
        pm_runtime_disable(&pdev->dev);
        ci_hdrc_remove_device(data->ci_pdev);
 -
 -      if (data->phy)
 -              usb_phy_shutdown(data->phy);
 -
        clk_disable_unprepare(data->clk);
  
        return 0;
@@@ -30,17 -30,13 +30,17 @@@ static const char hcd_name[] = "ehci-at
  static struct hc_driver __read_mostly ehci_atmel_hc_driver;
  
  /* interface and function clocks */
 -static struct clk *iclk, *fclk;
 +static struct clk *iclk, *fclk, *uclk;
  static int clocked;
  
  /*-------------------------------------------------------------------------*/
  
  static void atmel_start_clock(void)
  {
 +      if (IS_ENABLED(CONFIG_COMMON_CLK)) {
 +              clk_set_rate(uclk, 48000000);
 +              clk_prepare_enable(uclk);
 +      }
        clk_prepare_enable(iclk);
        clk_prepare_enable(fclk);
        clocked = 1;
@@@ -50,8 -46,6 +50,8 @@@ static void atmel_stop_clock(void
  {
        clk_disable_unprepare(fclk);
        clk_disable_unprepare(iclk);
 +      if (IS_ENABLED(CONFIG_COMMON_CLK))
 +              clk_disable_unprepare(uclk);
        clocked = 0;
  }
  
@@@ -96,10 -90,9 +96,9 @@@ static int ehci_atmel_drv_probe(struct 
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               goto fail_create_hcd;
  
        hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
        if (!hcd) {
                retval = -ENOENT;
                goto fail_request_resource;
        }
 +      if (IS_ENABLED(CONFIG_COMMON_CLK)) {
 +              uclk = devm_clk_get(&pdev->dev, "usb_clk");
 +              if (IS_ERR(uclk)) {
 +                      dev_err(&pdev->dev, "failed to get uclk\n");
 +                      retval = PTR_ERR(uclk);
 +                      goto fail_request_resource;
 +              }
 +      }
  
        ehci = hcd_to_ehci(hcd);
        /* registers start at offset 0x0 */
index 016352e,0000000..e97c198
mode 100644,000000..100644
--- /dev/null
@@@ -1,301 -1,0 +1,300 @@@
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 +/*
 + * SAMSUNG EXYNOS USB HOST EHCI Controller
 + *
 + * Copyright (C) 2011 Samsung Electronics Co.Ltd
 + * Author: Jingoo Han <jg1.han@samsung.com>
 + * Author: Joonyoung Shim <jy0922.shim@samsung.com>
 + *
 + * This program is free software; you can redistribute  it and/or modify it
 + * under  the terms of  the GNU General  Public License as published by the
 + * Free Software Foundation;  either version 2 of the  License, or (at your
 + * option) any later version.
 + *
 + */
 +
 +#include <linux/clk.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/io.h>
 +#include <linux/kernel.h>
 +#include <linux/module.h>
 +#include <linux/of.h>
 +#include <linux/of_gpio.h>
 +#include <linux/platform_device.h>
 +#include <linux/usb/phy.h>
 +#include <linux/usb/samsung_usb_phy.h>
 +#include <linux/usb.h>
 +#include <linux/usb/hcd.h>
 +#include <linux/usb/otg.h>
 +
 +#include "ehci.h"
 +
 +#define DRIVER_DESC "EHCI EXYNOS driver"
 +
 +#define EHCI_INSNREG00(base)                  (base + 0x90)
 +#define EHCI_INSNREG00_ENA_INCR16             (0x1 << 25)
 +#define EHCI_INSNREG00_ENA_INCR8              (0x1 << 24)
 +#define EHCI_INSNREG00_ENA_INCR4              (0x1 << 23)
 +#define EHCI_INSNREG00_ENA_INCRX_ALIGN                (0x1 << 22)
 +#define EHCI_INSNREG00_ENABLE_DMA_BURST       \
 +      (EHCI_INSNREG00_ENA_INCR16 | EHCI_INSNREG00_ENA_INCR8 | \
 +       EHCI_INSNREG00_ENA_INCR4 | EHCI_INSNREG00_ENA_INCRX_ALIGN)
 +
 +static const char hcd_name[] = "ehci-exynos";
 +static struct hc_driver __read_mostly exynos_ehci_hc_driver;
 +
 +struct exynos_ehci_hcd {
 +      struct clk *clk;
 +      struct usb_phy *phy;
 +      struct usb_otg *otg;
 +};
 +
 +#define to_exynos_ehci(hcd) (struct exynos_ehci_hcd *)(hcd_to_ehci(hcd)->priv)
 +
 +static void exynos_setup_vbus_gpio(struct platform_device *pdev)
 +{
 +      struct device *dev = &pdev->dev;
 +      int err;
 +      int gpio;
 +
 +      if (!dev->of_node)
 +              return;
 +
 +      gpio = of_get_named_gpio(dev->of_node, "samsung,vbus-gpio", 0);
 +      if (!gpio_is_valid(gpio))
 +              return;
 +
 +      err = devm_gpio_request_one(dev, gpio, GPIOF_OUT_INIT_HIGH,
 +                                  "ehci_vbus_gpio");
 +      if (err)
 +              dev_err(dev, "can't request ehci vbus gpio %d", gpio);
 +}
 +
 +static int exynos_ehci_probe(struct platform_device *pdev)
 +{
 +      struct exynos_ehci_hcd *exynos_ehci;
 +      struct usb_hcd *hcd;
 +      struct ehci_hcd *ehci;
 +      struct resource *res;
 +      struct usb_phy *phy;
 +      int irq;
 +      int err;
 +
 +      /*
 +       * Right now device-tree probed devices don't get dma_mask set.
 +       * Since shared usb code relies on it, set it here for now.
 +       * Once we move to full device tree support this will vanish off.
 +       */
++      err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++      if (err)
++              return err;
 +
 +      exynos_setup_vbus_gpio(pdev);
 +
 +      hcd = usb_create_hcd(&exynos_ehci_hc_driver,
 +                           &pdev->dev, dev_name(&pdev->dev));
 +      if (!hcd) {
 +              dev_err(&pdev->dev, "Unable to create HCD\n");
 +              return -ENOMEM;
 +      }
 +      exynos_ehci = to_exynos_ehci(hcd);
 +
 +      if (of_device_is_compatible(pdev->dev.of_node,
 +                                      "samsung,exynos5440-ehci"))
 +              goto skip_phy;
 +
 +      phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
 +      if (IS_ERR(phy)) {
 +              usb_put_hcd(hcd);
 +              dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
 +              return -EPROBE_DEFER;
 +      } else {
 +              exynos_ehci->phy = phy;
 +              exynos_ehci->otg = phy->otg;
 +      }
 +
 +skip_phy:
 +
 +      exynos_ehci->clk = devm_clk_get(&pdev->dev, "usbhost");
 +
 +      if (IS_ERR(exynos_ehci->clk)) {
 +              dev_err(&pdev->dev, "Failed to get usbhost clock\n");
 +              err = PTR_ERR(exynos_ehci->clk);
 +              goto fail_clk;
 +      }
 +
 +      err = clk_prepare_enable(exynos_ehci->clk);
 +      if (err)
 +              goto fail_clk;
 +
 +      res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 +      if (!res) {
 +              dev_err(&pdev->dev, "Failed to get I/O memory\n");
 +              err = -ENXIO;
 +              goto fail_io;
 +      }
 +
 +      hcd->rsrc_start = res->start;
 +      hcd->rsrc_len = resource_size(res);
 +      hcd->regs = devm_ioremap(&pdev->dev, res->start, hcd->rsrc_len);
 +      if (!hcd->regs) {
 +              dev_err(&pdev->dev, "Failed to remap I/O memory\n");
 +              err = -ENOMEM;
 +              goto fail_io;
 +      }
 +
 +      irq = platform_get_irq(pdev, 0);
 +      if (!irq) {
 +              dev_err(&pdev->dev, "Failed to get IRQ\n");
 +              err = -ENODEV;
 +              goto fail_io;
 +      }
 +
 +      if (exynos_ehci->otg)
 +              exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
 +
 +      if (exynos_ehci->phy)
 +              usb_phy_init(exynos_ehci->phy);
 +
 +      ehci = hcd_to_ehci(hcd);
 +      ehci->caps = hcd->regs;
 +
 +      /* DMA burst Enable */
 +      writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
 +
 +      err = usb_add_hcd(hcd, irq, IRQF_SHARED);
 +      if (err) {
 +              dev_err(&pdev->dev, "Failed to add USB HCD\n");
 +              goto fail_add_hcd;
 +      }
 +
 +      platform_set_drvdata(pdev, hcd);
 +
 +      return 0;
 +
 +fail_add_hcd:
 +      if (exynos_ehci->phy)
 +              usb_phy_shutdown(exynos_ehci->phy);
 +fail_io:
 +      clk_disable_unprepare(exynos_ehci->clk);
 +fail_clk:
 +      usb_put_hcd(hcd);
 +      return err;
 +}
 +
 +static int exynos_ehci_remove(struct platform_device *pdev)
 +{
 +      struct usb_hcd *hcd = platform_get_drvdata(pdev);
 +      struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
 +
 +      usb_remove_hcd(hcd);
 +
 +      if (exynos_ehci->otg)
 +              exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
 +
 +      if (exynos_ehci->phy)
 +              usb_phy_shutdown(exynos_ehci->phy);
 +
 +      clk_disable_unprepare(exynos_ehci->clk);
 +
 +      usb_put_hcd(hcd);
 +
 +      return 0;
 +}
 +
 +#ifdef CONFIG_PM
 +static int exynos_ehci_suspend(struct device *dev)
 +{
 +      struct usb_hcd *hcd = dev_get_drvdata(dev);
 +      struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
 +
 +      bool do_wakeup = device_may_wakeup(dev);
 +      int rc;
 +
 +      rc = ehci_suspend(hcd, do_wakeup);
 +
 +      if (exynos_ehci->otg)
 +              exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
 +
 +      if (exynos_ehci->phy)
 +              usb_phy_shutdown(exynos_ehci->phy);
 +
 +      clk_disable_unprepare(exynos_ehci->clk);
 +
 +      return rc;
 +}
 +
 +static int exynos_ehci_resume(struct device *dev)
 +{
 +      struct usb_hcd *hcd = dev_get_drvdata(dev);
 +      struct exynos_ehci_hcd *exynos_ehci = to_exynos_ehci(hcd);
 +
 +      clk_prepare_enable(exynos_ehci->clk);
 +
 +      if (exynos_ehci->otg)
 +              exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
 +
 +      if (exynos_ehci->phy)
 +              usb_phy_init(exynos_ehci->phy);
 +
 +      /* DMA burst Enable */
 +      writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
 +
 +      ehci_resume(hcd, false);
 +      return 0;
 +}
 +#else
 +#define exynos_ehci_suspend   NULL
 +#define exynos_ehci_resume    NULL
 +#endif
 +
 +static const struct dev_pm_ops exynos_ehci_pm_ops = {
 +      .suspend        = exynos_ehci_suspend,
 +      .resume         = exynos_ehci_resume,
 +};
 +
 +#ifdef CONFIG_OF
 +static const struct of_device_id exynos_ehci_match[] = {
 +      { .compatible = "samsung,exynos4210-ehci" },
 +      { .compatible = "samsung,exynos5440-ehci" },
 +      {},
 +};
 +MODULE_DEVICE_TABLE(of, exynos_ehci_match);
 +#endif
 +
 +static struct platform_driver exynos_ehci_driver = {
 +      .probe          = exynos_ehci_probe,
 +      .remove         = exynos_ehci_remove,
 +      .shutdown       = usb_hcd_platform_shutdown,
 +      .driver = {
 +              .name   = "exynos-ehci",
 +              .owner  = THIS_MODULE,
 +              .pm     = &exynos_ehci_pm_ops,
 +              .of_match_table = of_match_ptr(exynos_ehci_match),
 +      }
 +};
 +static const struct ehci_driver_overrides exynos_overrides __initdata = {
 +      .extra_priv_size = sizeof(struct exynos_ehci_hcd),
 +};
 +
 +static int __init ehci_exynos_init(void)
 +{
 +      if (usb_disabled())
 +              return -ENODEV;
 +
 +      pr_info("%s: " DRIVER_DESC "\n", hcd_name);
 +      ehci_init_driver(&exynos_ehci_hc_driver, &exynos_overrides);
 +      return platform_driver_register(&exynos_ehci_driver);
 +}
 +module_init(ehci_exynos_init);
 +
 +static void __exit ehci_exynos_cleanup(void)
 +{
 +      platform_driver_unregister(&exynos_ehci_driver);
 +}
 +module_exit(ehci_exynos_cleanup);
 +
 +MODULE_DESCRIPTION(DRIVER_DESC);
 +MODULE_ALIAS("platform:exynos-ehci");
 +MODULE_AUTHOR("Jingoo Han");
 +MODULE_AUTHOR("Joonyoung Shim");
 +MODULE_LICENSE("GPL v2");
@@@ -362,10 -362,9 +362,9 @@@ static int tegra_ehci_probe(struct plat
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
  
        hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
                                        dev_name(&pdev->dev));
  
        err = clk_prepare_enable(tegra->clk);
        if (err)
 -              goto cleanup_clk_get;
 +              goto cleanup_hcd_create;
  
        tegra_periph_reset_assert(tegra->clk);
        udelay(1);
@@@ -465,6 -464,8 +464,6 @@@ cleanup_phy
        usb_phy_shutdown(hcd->phy);
  cleanup_clk_en:
        clk_disable_unprepare(tegra->clk);
 -cleanup_clk_get:
 -      clk_put(tegra->clk);
  cleanup_hcd_create:
        usb_put_hcd(hcd);
        return err;
   */
  
  #include <linux/clk.h>
 -#include <linux/platform_device.h>
 +#include <linux/dma-mapping.h>
  #include <linux/of_platform.h>
  #include <linux/of_gpio.h>
 +#include <linux/platform_device.h>
  #include <linux/platform_data/atmel.h>
 +#include <linux/io.h>
 +#include <linux/kernel.h>
 +#include <linux/module.h>
 +#include <linux/usb.h>
 +#include <linux/usb/hcd.h>
  
  #include <mach/hardware.h>
  #include <asm/gpio.h>
  
  #include <mach/cpu.h>
  
 -#ifndef CONFIG_ARCH_AT91
 -#error "CONFIG_ARCH_AT91 must be defined."
 -#endif
 +
 +#include "ohci.h"
  
  #define valid_port(index)     ((index) >= 0 && (index) < AT91_MAX_USBH_PORTS)
  #define at91_for_each_port(index)     \
  
  /* interface, function and usb clocks; sometimes also an AHB clock */
  static struct clk *iclk, *fclk, *uclk, *hclk;
 +/* interface and function clocks; sometimes also an AHB clock */
 +
 +#define DRIVER_DESC "OHCI Atmel driver"
 +
 +static const char hcd_name[] = "ohci-atmel";
 +
 +static struct hc_driver __read_mostly ohci_at91_hc_driver;
  static int clocked;
 +static int (*orig_ohci_hub_control)(struct usb_hcd  *hcd, u16 typeReq,
 +                      u16 wValue, u16 wIndex, char *buf, u16 wLength);
 +static int (*orig_ohci_hub_status_data)(struct usb_hcd *hcd, char *buf);
  
  extern int usb_disabled(void);
  
@@@ -132,8 -117,6 +132,8 @@@ static void usb_hcd_at91_remove (struc
  static int usb_hcd_at91_probe(const struct hc_driver *driver,
                        struct platform_device *pdev)
  {
 +      struct at91_usbh_data *board;
 +      struct ohci_hcd *ohci;
        int retval;
        struct usb_hcd *hcd = NULL;
  
                }
        }
  
 +      board = hcd->self.controller->platform_data;
 +      ohci = hcd_to_ohci(hcd);
 +      ohci->num_ports = board->ports;
        at91_start_hc(pdev);
 -      ohci_hcd_init(hcd_to_ohci(hcd));
  
        retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
        if (retval == 0)
@@@ -257,6 -238,36 +257,6 @@@ static void usb_hcd_at91_remove(struct 
  }
  
  /*-------------------------------------------------------------------------*/
 -
 -static int
 -ohci_at91_reset (struct usb_hcd *hcd)
 -{
 -      struct at91_usbh_data   *board = dev_get_platdata(hcd->self.controller);
 -      struct ohci_hcd         *ohci = hcd_to_ohci (hcd);
 -      int                     ret;
 -
 -      if ((ret = ohci_init(ohci)) < 0)
 -              return ret;
 -
 -      ohci->num_ports = board->ports;
 -      return 0;
 -}
 -
 -static int
 -ohci_at91_start (struct usb_hcd *hcd)
 -{
 -      struct ohci_hcd         *ohci = hcd_to_ohci (hcd);
 -      int                     ret;
 -
 -      if ((ret = ohci_run(ohci)) < 0) {
 -              dev_err(hcd->self.controller, "can't start %s\n",
 -                      hcd->self.bus_name);
 -              ohci_stop(hcd);
 -              return ret;
 -      }
 -      return 0;
 -}
 -
  static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int enable)
  {
        if (!valid_port(port))
@@@ -286,8 -297,8 +286,8 @@@ static int ohci_at91_usb_get_power(stru
   */
  static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
  {
 -      struct at91_usbh_data *pdata = dev_get_platdata(hcd->self.controller);
 -      int length = ohci_hub_status_data(hcd, buf);
 +      struct at91_usbh_data *pdata = hcd->self.controller->platform_data;
 +      int length = orig_ohci_hub_status_data(hcd, buf);
        int port;
  
        at91_for_each_port(port) {
@@@ -365,8 -376,7 +365,8 @@@ static int ohci_at91_hub_control(struc
                break;
        }
  
 -      ret = ohci_hub_control(hcd, typeReq, wValue, wIndex + 1, buf, wLength);
 +      ret = orig_ohci_hub_control(hcd, typeReq, wValue, wIndex + 1,
 +                              buf, wLength);
        if (ret)
                goto out;
  
  
  /*-------------------------------------------------------------------------*/
  
 -static const struct hc_driver ohci_at91_hc_driver = {
 -      .description =          hcd_name,
 -      .product_desc =         "AT91 OHCI",
 -      .hcd_priv_size =        sizeof(struct ohci_hcd),
 -
 -      /*
 -       * generic hardware linkage
 -       */
 -      .irq =                  ohci_irq,
 -      .flags =                HCD_USB11 | HCD_MEMORY,
 -
 -      /*
 -       * basic lifecycle operations
 -       */
 -      .reset =                ohci_at91_reset,
 -      .start =                ohci_at91_start,
 -      .stop =                 ohci_stop,
 -      .shutdown =             ohci_shutdown,
 -
 -      /*
 -       * managing i/o requests and associated device resources
 -       */
 -      .urb_enqueue =          ohci_urb_enqueue,
 -      .urb_dequeue =          ohci_urb_dequeue,
 -      .endpoint_disable =     ohci_endpoint_disable,
 -
 -      /*
 -       * scheduling support
 -       */
 -      .get_frame_number =     ohci_get_frame,
 -
 -      /*
 -       * root hub support
 -       */
 -      .hub_status_data =      ohci_at91_hub_status_data,
 -      .hub_control =          ohci_at91_hub_control,
 -#ifdef CONFIG_PM
 -      .bus_suspend =          ohci_bus_suspend,
 -      .bus_resume =           ohci_bus_resume,
 -#endif
 -      .start_port_reset =     ohci_start_port_reset,
 -};
 -
 -/*-------------------------------------------------------------------------*/
 -
  static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
  {
        struct platform_device *pdev = data;
@@@ -469,7 -524,7 +469,7 @@@ MODULE_DEVICE_TABLE(of, at91_ohci_dt_id
  static int ohci_at91_of_init(struct platform_device *pdev)
  {
        struct device_node *np = pdev->dev.of_node;
-       int i, gpio;
+       int i, gpio, ret;
        enum of_gpio_flags flags;
        struct at91_usbh_data   *pdata;
        u32 ports;
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
  
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
@@@ -648,11 -702,7 +647,11 @@@ ohci_hcd_at91_drv_suspend(struct platfo
         * REVISIT: some boards will be able to turn VBUS off...
         */
        if (at91_suspend_entering_slow_clock()) {
 -              ohci_usb_reset (ohci);
 +              ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
 +              ohci->hc_control &= OHCI_CTRL_RWC;
 +              ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
 +              ohci->rh_state = OHCI_RH_HALTED;
 +
                /* flush the writes */
                (void) ohci_readl (ohci, &ohci->regs->control);
                at91_stop_clock();
@@@ -679,6 -729,8 +678,6 @@@ static int ohci_hcd_at91_drv_resume(str
  #define ohci_hcd_at91_drv_resume  NULL
  #endif
  
 -MODULE_ALIAS("platform:at91_ohci");
 -
  static struct platform_driver ohci_hcd_at91_driver = {
        .probe          = ohci_hcd_at91_drv_probe,
        .remove         = ohci_hcd_at91_drv_remove,
                .of_match_table = of_match_ptr(at91_ohci_dt_ids),
        },
  };
 +
 +static int __init ohci_at91_init(void)
 +{
 +      if (usb_disabled())
 +              return -ENODEV;
 +
 +      pr_info("%s: " DRIVER_DESC "\n", hcd_name);
 +      ohci_init_driver(&ohci_at91_hc_driver, NULL);
 +
 +      /*
 +       * The Atmel HW has some unusual quirks, which require Atmel-specific
 +       * workarounds. We override certain hc_driver functions here to
 +       * achieve that. We explicitly do not enhance ohci_driver_overrides to
 +       * allow this more easily, since this is an unusual case, and we don't
 +       * want to encourage others to override these functions by making it
 +       * too easy.
 +       */
 +
 +      orig_ohci_hub_control = ohci_at91_hc_driver.hub_control;
 +      orig_ohci_hub_status_data = ohci_at91_hc_driver.hub_status_data;
 +
 +      ohci_at91_hc_driver.hub_status_data     = ohci_at91_hub_status_data;
 +      ohci_at91_hc_driver.hub_control         = ohci_at91_hub_control;
 +
 +      return platform_driver_register(&ohci_hcd_at91_driver);
 +}
 +module_init(ohci_at91_init);
 +
 +static void __exit ohci_at91_cleanup(void)
 +{
 +      platform_driver_unregister(&ohci_hcd_at91_driver);
 +}
 +module_exit(ohci_at91_cleanup);
 +
 +MODULE_DESCRIPTION(DRIVER_DESC);
 +MODULE_LICENSE("GPL");
 +MODULE_ALIAS("platform:at91_ohci");
   */
  
  #include <linux/clk.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/io.h>
 +#include <linux/kernel.h>
 +#include <linux/module.h>
  #include <linux/of.h>
  #include <linux/platform_device.h>
 -#include <linux/platform_data/usb-ohci-exynos.h>
  #include <linux/usb/phy.h>
  #include <linux/usb/samsung_usb_phy.h>
 +#include <linux/usb.h>
 +#include <linux/usb/hcd.h>
 +#include <linux/usb/otg.h>
 +
 +#include "ohci.h"
 +
 +#define DRIVER_DESC "OHCI EXYNOS driver"
 +
 +static const char hcd_name[] = "ohci-exynos";
 +static struct hc_driver __read_mostly exynos_ohci_hc_driver;
 +
 +#define to_exynos_ohci(hcd) (struct exynos_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
  
  struct exynos_ohci_hcd {
 -      struct device *dev;
 -      struct usb_hcd *hcd;
        struct clk *clk;
        struct usb_phy *phy;
        struct usb_otg *otg;
 -      struct exynos4_ohci_platdata *pdata;
  };
  
 -static void exynos_ohci_phy_enable(struct exynos_ohci_hcd *exynos_ohci)
 +static void exynos_ohci_phy_enable(struct platform_device *pdev)
  {
 -      struct platform_device *pdev = to_platform_device(exynos_ohci->dev);
 +      struct usb_hcd *hcd = platform_get_drvdata(pdev);
 +      struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
  
        if (exynos_ohci->phy)
                usb_phy_init(exynos_ohci->phy);
 -      else if (exynos_ohci->pdata && exynos_ohci->pdata->phy_init)
 -              exynos_ohci->pdata->phy_init(pdev, USB_PHY_TYPE_HOST);
  }
  
 -static void exynos_ohci_phy_disable(struct exynos_ohci_hcd *exynos_ohci)
 +static void exynos_ohci_phy_disable(struct platform_device *pdev)
  {
 -      struct platform_device *pdev = to_platform_device(exynos_ohci->dev);
 +      struct usb_hcd *hcd = platform_get_drvdata(pdev);
 +      struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
  
        if (exynos_ohci->phy)
                usb_phy_shutdown(exynos_ohci->phy);
 -      else if (exynos_ohci->pdata && exynos_ohci->pdata->phy_exit)
 -              exynos_ohci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST);
 -}
 -
 -static int ohci_exynos_reset(struct usb_hcd *hcd)
 -{
 -      return ohci_init(hcd_to_ohci(hcd));
  }
  
 -static int ohci_exynos_start(struct usb_hcd *hcd)
 -{
 -      struct ohci_hcd *ohci = hcd_to_ohci(hcd);
 -      int ret;
 -
 -      ohci_dbg(ohci, "ohci_exynos_start, ohci:%p", ohci);
 -
 -      ret = ohci_run(ohci);
 -      if (ret < 0) {
 -              dev_err(hcd->self.controller, "can't start %s\n",
 -                      hcd->self.bus_name);
 -              ohci_stop(hcd);
 -              return ret;
 -      }
 -
 -      return 0;
 -}
 -
 -static const struct hc_driver exynos_ohci_hc_driver = {
 -      .description            = hcd_name,
 -      .product_desc           = "EXYNOS OHCI Host Controller",
 -      .hcd_priv_size          = sizeof(struct ohci_hcd),
 -
 -      .irq                    = ohci_irq,
 -      .flags                  = HCD_MEMORY|HCD_USB11,
 -
 -      .reset                  = ohci_exynos_reset,
 -      .start                  = ohci_exynos_start,
 -      .stop                   = ohci_stop,
 -      .shutdown               = ohci_shutdown,
 -
 -      .get_frame_number       = ohci_get_frame,
 -
 -      .urb_enqueue            = ohci_urb_enqueue,
 -      .urb_dequeue            = ohci_urb_dequeue,
 -      .endpoint_disable       = ohci_endpoint_disable,
 -
 -      .hub_status_data        = ohci_hub_status_data,
 -      .hub_control            = ohci_hub_control,
 -#ifdef        CONFIG_PM
 -      .bus_suspend            = ohci_bus_suspend,
 -      .bus_resume             = ohci_bus_resume,
 -#endif
 -      .start_port_reset       = ohci_start_port_reset,
 -};
 -
  static int exynos_ohci_probe(struct platform_device *pdev)
  {
 -      struct exynos4_ohci_platdata *pdata = dev_get_platdata(&pdev->dev);
        struct exynos_ohci_hcd *exynos_ohci;
        struct usb_hcd *hcd;
 -      struct ohci_hcd *ohci;
        struct resource *res;
        struct usb_phy *phy;
        int irq;
         * Since shared usb code relies on it, set it here for now.
         * Once we move to full device tree support this will vanish off.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
  
 -      exynos_ohci = devm_kzalloc(&pdev->dev, sizeof(struct exynos_ohci_hcd),
 -                                      GFP_KERNEL);
 -      if (!exynos_ohci)
 +      hcd = usb_create_hcd(&exynos_ohci_hc_driver,
 +                              &pdev->dev, dev_name(&pdev->dev));
 +      if (!hcd) {
 +              dev_err(&pdev->dev, "Unable to create HCD\n");
                return -ENOMEM;
 +      }
 +
 +      exynos_ohci = to_exynos_ohci(hcd);
  
        if (of_device_is_compatible(pdev->dev.of_node,
                                        "samsung,exynos5440-ohci"))
  
        phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
        if (IS_ERR(phy)) {
 -              /* Fallback to pdata */
 -              if (!pdata) {
 -                      dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
 -                      return -EPROBE_DEFER;
 -              } else {
 -                      exynos_ohci->pdata = pdata;
 -              }
 +              usb_put_hcd(hcd);
 +              dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
 +              return -EPROBE_DEFER;
        } else {
                exynos_ohci->phy = phy;
                exynos_ohci->otg = phy->otg;
        }
  
  skip_phy:
 -
 -      exynos_ohci->dev = &pdev->dev;
 -
 -      hcd = usb_create_hcd(&exynos_ohci_hc_driver, &pdev->dev,
 -                                      dev_name(&pdev->dev));
 -      if (!hcd) {
 -              dev_err(&pdev->dev, "Unable to create HCD\n");
 -              return -ENOMEM;
 -      }
 -
 -      exynos_ohci->hcd = hcd;
        exynos_ohci->clk = devm_clk_get(&pdev->dev, "usbhost");
  
        if (IS_ERR(exynos_ohci->clk)) {
        }
  
        if (exynos_ohci->otg)
 -              exynos_ohci->otg->set_host(exynos_ohci->otg,
 -                                      &exynos_ohci->hcd->self);
 +              exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
  
 -      exynos_ohci_phy_enable(exynos_ohci);
 +      platform_set_drvdata(pdev, hcd);
  
 -      ohci = hcd_to_ohci(hcd);
 -      ohci_hcd_init(ohci);
 +      exynos_ohci_phy_enable(pdev);
  
        err = usb_add_hcd(hcd, irq, IRQF_SHARED);
        if (err) {
                dev_err(&pdev->dev, "Failed to add USB HCD\n");
                goto fail_add_hcd;
        }
 -
 -      platform_set_drvdata(pdev, exynos_ohci);
 -
        return 0;
  
  fail_add_hcd:
 -      exynos_ohci_phy_disable(exynos_ohci);
 +      exynos_ohci_phy_disable(pdev);
  fail_io:
        clk_disable_unprepare(exynos_ohci->clk);
  fail_clk:
  
  static int exynos_ohci_remove(struct platform_device *pdev)
  {
 -      struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev);
 -      struct usb_hcd *hcd = exynos_ohci->hcd;
 +      struct usb_hcd *hcd = platform_get_drvdata(pdev);
 +      struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
  
        usb_remove_hcd(hcd);
  
        if (exynos_ohci->otg)
 -              exynos_ohci->otg->set_host(exynos_ohci->otg,
 -                                      &exynos_ohci->hcd->self);
 +              exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
  
 -      exynos_ohci_phy_disable(exynos_ohci);
 +      exynos_ohci_phy_disable(pdev);
  
        clk_disable_unprepare(exynos_ohci->clk);
  
  
  static void exynos_ohci_shutdown(struct platform_device *pdev)
  {
 -      struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev);
 -      struct usb_hcd *hcd = exynos_ohci->hcd;
 +      struct usb_hcd *hcd = platform_get_drvdata(pdev);
  
        if (hcd->driver->shutdown)
                hcd->driver->shutdown(hcd);
  #ifdef CONFIG_PM
  static int exynos_ohci_suspend(struct device *dev)
  {
 -      struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
 -      struct usb_hcd *hcd = exynos_ohci->hcd;
 +      struct usb_hcd *hcd = dev_get_drvdata(dev);
 +      struct exynos_ohci_hcd *exynos_ohci = to_exynos_ohci(hcd);
        struct ohci_hcd *ohci = hcd_to_ohci(hcd);
 +      struct platform_device *pdev = to_platform_device(dev);
        unsigned long flags;
        int rc = 0;
  
        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
  
        if (exynos_ohci->otg)
 -              exynos_ohci->otg->set_host(exynos_ohci->otg,
 -                                      &exynos_ohci->hcd->self);
 +              exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
  
 -      exynos_ohci_phy_disable(exynos_ohci);
 +      exynos_ohci_phy_disable(pdev);
  
        clk_disable_unprepare(exynos_ohci->clk);
  
@@@ -225,16 -285,16 +224,16 @@@ fail
  
  static int exynos_ohci_resume(struct device *dev)
  {
 -      struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
 -      struct usb_hcd *hcd = exynos_ohci->hcd;
 +      struct usb_hcd *hcd                     = dev_get_drvdata(dev);
 +      struct exynos_ohci_hcd *exynos_ohci     = to_exynos_ohci(hcd);
 +      struct platform_device *pdev            = to_platform_device(dev);
  
        clk_prepare_enable(exynos_ohci->clk);
  
        if (exynos_ohci->otg)
 -              exynos_ohci->otg->set_host(exynos_ohci->otg,
 -                                      &exynos_ohci->hcd->self);
 +              exynos_ohci->otg->set_host(exynos_ohci->otg, &hcd->self);
  
 -      exynos_ohci_phy_enable(exynos_ohci);
 +      exynos_ohci_phy_enable(pdev);
  
        ohci_resume(hcd, false);
  
  #define exynos_ohci_resume    NULL
  #endif
  
 +static const struct ohci_driver_overrides exynos_overrides __initconst = {
 +      .extra_priv_size =      sizeof(struct exynos_ohci_hcd),
 +};
 +
  static const struct dev_pm_ops exynos_ohci_pm_ops = {
        .suspend        = exynos_ohci_suspend,
        .resume         = exynos_ohci_resume,
@@@ -274,23 -330,6 +273,23 @@@ static struct platform_driver exynos_oh
                .of_match_table = of_match_ptr(exynos_ohci_match),
        }
  };
 +static int __init ohci_exynos_init(void)
 +{
 +      if (usb_disabled())
 +              return -ENODEV;
 +
 +      pr_info("%s: " DRIVER_DESC "\n", hcd_name);
 +      ohci_init_driver(&exynos_ohci_hc_driver, &exynos_overrides);
 +      return platform_driver_register(&exynos_ohci_driver);
 +}
 +module_init(ohci_exynos_init);
 +
 +static void __exit ohci_exynos_cleanup(void)
 +{
 +      platform_driver_unregister(&exynos_ohci_driver);
 +}
 +module_exit(ohci_exynos_cleanup);
  
  MODULE_ALIAS("platform:exynos-ohci");
  MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
 +MODULE_LICENSE("GPL v2");
   * or implied.
   */
  #include <linux/clk.h>
 -#include <linux/platform_device.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/io.h>
  #include <linux/i2c.h>
 +#include <linux/kernel.h>
 +#include <linux/module.h>
  #include <linux/of.h>
 +#include <linux/platform_device.h>
  #include <linux/usb/isp1301.h>
 +#include <linux/usb.h>
 +#include <linux/usb/hcd.h>
 +
 +#include "ohci.h"
 +
  
  #include <mach/hardware.h>
  #include <asm/mach-types.h>
  #define start_int_umask(irq)
  #endif
  
 +#define DRIVER_DESC "OHCI NXP driver"
 +
 +static const char hcd_name[] = "ohci-nxp";
 +static struct hc_driver __read_mostly ohci_nxp_hc_driver;
 +
  static struct i2c_client *isp1301_i2c_client;
  
  extern int usb_disabled(void);
@@@ -146,14 -132,14 +146,14 @@@ static inline void isp1301_vbus_off(voi
                OTG1_VBUS_DRV);
  }
  
 -static void nxp_start_hc(void)
 +static void ohci_nxp_start_hc(void)
  {
        unsigned long tmp = __raw_readl(USB_OTG_STAT_CONTROL) | HOST_EN;
        __raw_writel(tmp, USB_OTG_STAT_CONTROL);
        isp1301_vbus_on();
  }
  
 -static void nxp_stop_hc(void)
 +static void ohci_nxp_stop_hc(void)
  {
        unsigned long tmp;
        isp1301_vbus_off();
        __raw_writel(tmp, USB_OTG_STAT_CONTROL);
  }
  
 -static int ohci_nxp_start(struct usb_hcd *hcd)
 -{
 -      struct ohci_hcd *ohci = hcd_to_ohci(hcd);
 -      int ret;
 -
 -      if ((ret = ohci_init(ohci)) < 0)
 -              return ret;
 -
 -      if ((ret = ohci_run(ohci)) < 0) {
 -              dev_err(hcd->self.controller, "can't start\n");
 -              ohci_stop(hcd);
 -              return ret;
 -      }
 -      return 0;
 -}
 -
 -static const struct hc_driver ohci_nxp_hc_driver = {
 -      .description = hcd_name,
 -      .product_desc =         "nxp OHCI",
 -
 -      /*
 -       * generic hardware linkage
 -       */
 -      .irq = ohci_irq,
 -      .flags = HCD_USB11 | HCD_MEMORY,
 -
 -      .hcd_priv_size =        sizeof(struct ohci_hcd),
 -      /*
 -       * basic lifecycle operations
 -       */
 -      .start = ohci_nxp_start,
 -      .stop = ohci_stop,
 -      .shutdown = ohci_shutdown,
 -
 -      /*
 -       * managing i/o requests and associated device resources
 -       */
 -      .urb_enqueue = ohci_urb_enqueue,
 -      .urb_dequeue = ohci_urb_dequeue,
 -      .endpoint_disable = ohci_endpoint_disable,
 -
 -      /*
 -       * scheduling support
 -       */
 -      .get_frame_number = ohci_get_frame,
 -
 -      /*
 -       * root hub support
 -       */
 -      .hub_status_data = ohci_hub_status_data,
 -      .hub_control = ohci_hub_control,
 -#ifdef        CONFIG_PM
 -      .bus_suspend = ohci_bus_suspend,
 -      .bus_resume = ohci_bus_resume,
 -#endif
 -      .start_port_reset = ohci_start_port_reset,
 -};
 -
 -static int usb_hcd_nxp_probe(struct platform_device *pdev)
 +static int ohci_hcd_nxp_probe(struct platform_device *pdev)
  {
        struct usb_hcd *hcd = 0;
 -      struct ohci_hcd *ohci;
        const struct hc_driver *driver = &ohci_nxp_hc_driver;
        struct resource *res;
        int ret = 0, irq;
                return -EPROBE_DEFER;
        }
  
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto fail_disable;
  
        dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
        if (usb_disabled()) {
                goto fail_resource;
        }
  
 -      nxp_start_hc();
 +      ohci_nxp_start_hc();
        platform_set_drvdata(pdev, hcd);
 -      ohci = hcd_to_ohci(hcd);
 -      ohci_hcd_init(ohci);
  
        dev_info(&pdev->dev, "at 0x%p, irq %d\n", hcd->regs, hcd->irq);
        ret = usb_add_hcd(hcd, irq, 0);
        if (ret == 0)
                return ret;
  
 -      nxp_stop_hc();
 +      ohci_nxp_stop_hc();
  fail_resource:
        usb_put_hcd(hcd);
  fail_hcd:
@@@ -298,12 -346,12 +299,12 @@@ fail_disable
        return ret;
  }
  
 -static int usb_hcd_nxp_remove(struct platform_device *pdev)
 +static int ohci_hcd_nxp_remove(struct platform_device *pdev)
  {
        struct usb_hcd *hcd = platform_get_drvdata(pdev);
  
        usb_remove_hcd(hcd);
 -      nxp_stop_hc();
 +      ohci_nxp_stop_hc();
        usb_put_hcd(hcd);
        clk_disable(usb_pll_clk);
        clk_put(usb_pll_clk);
  MODULE_ALIAS("platform:usb-ohci");
  
  #ifdef CONFIG_OF
 -static const struct of_device_id usb_hcd_nxp_match[] = {
 +static const struct of_device_id ohci_hcd_nxp_match[] = {
        { .compatible = "nxp,ohci-nxp" },
        {},
  };
 -MODULE_DEVICE_TABLE(of, usb_hcd_nxp_match);
 +MODULE_DEVICE_TABLE(of, ohci_hcd_nxp_match);
  #endif
  
 -static struct platform_driver usb_hcd_nxp_driver = {
 +static struct platform_driver ohci_hcd_nxp_driver = {
        .driver = {
                .name = "usb-ohci",
                .owner  = THIS_MODULE,
 -              .of_match_table = of_match_ptr(usb_hcd_nxp_match),
 +              .of_match_table = of_match_ptr(ohci_hcd_nxp_match),
        },
 -      .probe = usb_hcd_nxp_probe,
 -      .remove = usb_hcd_nxp_remove,
 +      .probe = ohci_hcd_nxp_probe,
 +      .remove = ohci_hcd_nxp_remove,
  };
  
 +static int __init ohci_nxp_init(void)
 +{
 +      if (usb_disabled())
 +              return -ENODEV;
 +
 +      pr_info("%s: " DRIVER_DESC "\n", hcd_name);
 +
 +      ohci_init_driver(&ohci_nxp_hc_driver, NULL);
 +      return platform_driver_register(&ohci_hcd_nxp_driver);
 +}
 +module_init(ohci_nxp_init);
 +
 +static void __exit ohci_nxp_cleanup(void)
 +{
 +      platform_driver_unregister(&ohci_hcd_nxp_driver);
 +}
 +module_exit(ohci_nxp_cleanup);
 +
 +MODULE_DESCRIPTION(DRIVER_DESC);
 +MODULE_LICENSE("GPL v2");
   *    - add kernel-doc
   */
  
 +#include <linux/dma-mapping.h>
 +#include <linux/kernel.h>
 +#include <linux/module.h>
 +#include <linux/of.h>
 +#include <linux/usb/otg.h>
  #include <linux/platform_device.h>
  #include <linux/pm_runtime.h>
 -#include <linux/of.h>
 -#include <linux/dma-mapping.h>
 -
 -/*-------------------------------------------------------------------------*/
 -
 -static int ohci_omap3_init(struct usb_hcd *hcd)
 -{
 -      dev_dbg(hcd->self.controller, "starting OHCI controller\n");
 -
 -      return ohci_init(hcd_to_ohci(hcd));
 -}
 -
 -/*-------------------------------------------------------------------------*/
 -
 -static int ohci_omap3_start(struct usb_hcd *hcd)
 -{
 -      struct ohci_hcd *ohci = hcd_to_ohci(hcd);
 -      int ret;
 -
 -      /*
 -       * RemoteWakeupConnected has to be set explicitly before
 -       * calling ohci_run. The reset value of RWC is 0.
 -       */
 -      ohci->hc_control = OHCI_CTRL_RWC;
 -      writel(OHCI_CTRL_RWC, &ohci->regs->control);
 -
 -      ret = ohci_run(ohci);
 -
 -      if (ret < 0) {
 -              dev_err(hcd->self.controller, "can't start\n");
 -              ohci_stop(hcd);
 -      }
 +#include <linux/usb.h>
 +#include <linux/usb/hcd.h>
  
 -      return ret;
 -}
 +#include "ohci.h"
  
 -/*-------------------------------------------------------------------------*/
 +#define DRIVER_DESC "OHCI OMAP3 driver"
  
 -static const struct hc_driver ohci_omap3_hc_driver = {
 -      .description =          hcd_name,
 -      .product_desc =         "OMAP3 OHCI Host Controller",
 -      .hcd_priv_size =        sizeof(struct ohci_hcd),
 -
 -      /*
 -       * generic hardware linkage
 -       */
 -      .irq =                  ohci_irq,
 -      .flags =                HCD_USB11 | HCD_MEMORY,
 -
 -      /*
 -       * basic lifecycle operations
 -       */
 -      .reset =                ohci_omap3_init,
 -      .start =                ohci_omap3_start,
 -      .stop =                 ohci_stop,
 -      .shutdown =             ohci_shutdown,
 -
 -      /*
 -       * managing i/o requests and associated device resources
 -       */
 -      .urb_enqueue =          ohci_urb_enqueue,
 -      .urb_dequeue =          ohci_urb_dequeue,
 -      .endpoint_disable =     ohci_endpoint_disable,
 -
 -      /*
 -       * scheduling support
 -       */
 -      .get_frame_number =     ohci_get_frame,
 -
 -      /*
 -       * root hub support
 -       */
 -      .hub_status_data =      ohci_hub_status_data,
 -      .hub_control =          ohci_hub_control,
 -#ifdef        CONFIG_PM
 -      .bus_suspend =          ohci_bus_suspend,
 -      .bus_resume =           ohci_bus_resume,
 -#endif
 -      .start_port_reset =     ohci_start_port_reset,
 -};
 -
 -/*-------------------------------------------------------------------------*/
 +static const char hcd_name[] = "ohci-omap3";
 +static struct hc_driver __read_mostly ohci_omap3_hc_driver;
  
  /*
   * configure so an HC device and id are always provided
  static int ohci_hcd_omap3_probe(struct platform_device *pdev)
  {
        struct device           *dev = &pdev->dev;
 +      struct ohci_hcd         *ohci;
        struct usb_hcd          *hcd = NULL;
        void __iomem            *regs = NULL;
        struct resource         *res;
-       int                     ret = -ENODEV;
+       int                     ret;
        int                     irq;
  
        if (usb_disabled())
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!dev->dma_mask)
-               dev->dma_mask = &dev->coherent_dma_mask;
-       if (!dev->coherent_dma_mask)
-               dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto err_io;
  
+       ret = -ENODEV;
        hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
                        dev_name(dev));
        if (!hcd) {
        pm_runtime_enable(dev);
        pm_runtime_get_sync(dev);
  
 -      ohci_hcd_init(hcd_to_ohci(hcd));
 +      ohci = hcd_to_ohci(hcd);
 +      /*
 +       * RemoteWakeupConnected has to be set explicitly before
 +       * calling ohci_run. The reset value of RWC is 0.
 +       */
 +      ohci->hc_control = OHCI_CTRL_RWC;
  
        ret = usb_add_hcd(hcd, irq, 0);
        if (ret) {
@@@ -186,25 -248,5 +186,25 @@@ static struct platform_driver ohci_hcd_
        },
  };
  
 +static int __init ohci_omap3_init(void)
 +{
 +      if (usb_disabled())
 +              return -ENODEV;
 +
 +      pr_info("%s: " DRIVER_DESC "\n", hcd_name);
 +
 +      ohci_init_driver(&ohci_omap3_hc_driver, NULL);
 +      return platform_driver_register(&ohci_hcd_omap3_driver);
 +}
 +module_init(ohci_omap3_init);
 +
 +static void __exit ohci_omap3_cleanup(void)
 +{
 +      platform_driver_unregister(&ohci_hcd_omap3_driver);
 +}
 +module_exit(ohci_omap3_cleanup);
 +
 +MODULE_DESCRIPTION(DRIVER_DESC);
  MODULE_ALIAS("platform:ohci-omap3");
  MODULE_AUTHOR("Anand Gadiyar <gadiyar@ti.com>");
 +MODULE_LICENSE("GPL");
   * This file is licenced under the GPL.
   */
  
 -#include <linux/device.h>
 -#include <linux/signal.h>
 -#include <linux/platform_device.h>
  #include <linux/clk.h>
 +#include <linux/device.h>
 +#include <linux/io.h>
 +#include <linux/kernel.h>
 +#include <linux/module.h>
  #include <linux/of_platform.h>
  #include <linux/of_gpio.h>
 -#include <mach/hardware.h>
  #include <linux/platform_data/usb-ohci-pxa27x.h>
  #include <linux/platform_data/usb-pxa3xx-ulpi.h>
 +#include <linux/platform_device.h>
 +#include <linux/signal.h>
 +#include <linux/usb.h>
 +#include <linux/usb/hcd.h>
 +#include <linux/usb/otg.h>
 +
 +#include <mach/hardware.h>
 +
 +#include "ohci.h"
 +
 +#define DRIVER_DESC "OHCI PXA27x/PXA3x driver"
  
  /*
   * UHC: USB Host Controller (OHCI-like) register definitions
  
  #define PXA_UHC_MAX_PORTNUM    3
  
 -struct pxa27x_ohci {
 -      /* must be 1st member here for hcd_to_ohci() to work */
 -      struct ohci_hcd ohci;
 +static const char hcd_name[] = "ohci-pxa27x";
  
 -      struct device   *dev;
 +static struct hc_driver __read_mostly ohci_pxa27x_hc_driver;
 +
 +struct pxa27x_ohci {
        struct clk      *clk;
        void __iomem    *mmio_base;
  };
  
 -#define to_pxa27x_ohci(hcd)   (struct pxa27x_ohci *)hcd_to_ohci(hcd)
 +#define to_pxa27x_ohci(hcd)   (struct pxa27x_ohci *)(hcd_to_ohci(hcd)->priv)
  
  /*
    PMM_NPS_MODE -- PMM Non-power switching mode
    PMM_PERPORT_MODE -- PMM per port switching mode
        Ports are powered individually.
   */
 -static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *ohci, int mode)
 +static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *pxa_ohci, int mode)
  {
 -      uint32_t uhcrhda = __raw_readl(ohci->mmio_base + UHCRHDA);
 -      uint32_t uhcrhdb = __raw_readl(ohci->mmio_base + UHCRHDB);
 +      uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA);
 +      uint32_t uhcrhdb = __raw_readl(pxa_ohci->mmio_base + UHCRHDB);
  
        switch (mode) {
        case PMM_NPS_MODE:
                uhcrhda |= RH_A_NPS;
        }
  
 -      __raw_writel(uhcrhda, ohci->mmio_base + UHCRHDA);
 -      __raw_writel(uhcrhdb, ohci->mmio_base + UHCRHDB);
 +      __raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA);
 +      __raw_writel(uhcrhdb, pxa_ohci->mmio_base + UHCRHDB);
        return 0;
  }
  
 -extern int usb_disabled(void);
 -
  /*-------------------------------------------------------------------------*/
  
 -static inline void pxa27x_setup_hc(struct pxa27x_ohci *ohci,
 +static inline void pxa27x_setup_hc(struct pxa27x_ohci *pxa_ohci,
                                   struct pxaohci_platform_data *inf)
  {
 -      uint32_t uhchr = __raw_readl(ohci->mmio_base + UHCHR);
 -      uint32_t uhcrhda = __raw_readl(ohci->mmio_base + UHCRHDA);
 +      uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR);
 +      uint32_t uhcrhda = __raw_readl(pxa_ohci->mmio_base + UHCRHDA);
  
        if (inf->flags & ENABLE_PORT1)
                uhchr &= ~UHCHR_SSEP1;
                uhcrhda |= UHCRHDA_POTPGT(inf->power_on_delay / 2);
        }
  
 -      __raw_writel(uhchr, ohci->mmio_base + UHCHR);
 -      __raw_writel(uhcrhda, ohci->mmio_base + UHCRHDA);
 +      __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
 +      __raw_writel(uhcrhda, pxa_ohci->mmio_base + UHCRHDA);
  }
  
 -static inline void pxa27x_reset_hc(struct pxa27x_ohci *ohci)
 +static inline void pxa27x_reset_hc(struct pxa27x_ohci *pxa_ohci)
  {
 -      uint32_t uhchr = __raw_readl(ohci->mmio_base + UHCHR);
 +      uint32_t uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR);
  
 -      __raw_writel(uhchr | UHCHR_FHR, ohci->mmio_base + UHCHR);
 +      __raw_writel(uhchr | UHCHR_FHR, pxa_ohci->mmio_base + UHCHR);
        udelay(11);
 -      __raw_writel(uhchr & ~UHCHR_FHR, ohci->mmio_base + UHCHR);
 +      __raw_writel(uhchr & ~UHCHR_FHR, pxa_ohci->mmio_base + UHCHR);
  }
  
  #ifdef CONFIG_PXA27x
@@@ -222,26 -213,25 +222,26 @@@ extern void pxa27x_clear_otgph(void)
  #define pxa27x_clear_otgph()  do {} while (0)
  #endif
  
 -static int pxa27x_start_hc(struct pxa27x_ohci *ohci, struct device *dev)
 +static int pxa27x_start_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
  {
        int retval = 0;
        struct pxaohci_platform_data *inf;
        uint32_t uhchr;
 +      struct usb_hcd *hcd = dev_get_drvdata(dev);
  
        inf = dev_get_platdata(dev);
  
 -      clk_prepare_enable(ohci->clk);
 +      clk_prepare_enable(pxa_ohci->clk);
  
 -      pxa27x_reset_hc(ohci);
 +      pxa27x_reset_hc(pxa_ohci);
  
 -      uhchr = __raw_readl(ohci->mmio_base + UHCHR) | UHCHR_FSBIR;
 -      __raw_writel(uhchr, ohci->mmio_base + UHCHR);
 +      uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) | UHCHR_FSBIR;
 +      __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
  
 -      while (__raw_readl(ohci->mmio_base + UHCHR) & UHCHR_FSBIR)
 +      while (__raw_readl(pxa_ohci->mmio_base + UHCHR) & UHCHR_FSBIR)
                cpu_relax();
  
 -      pxa27x_setup_hc(ohci, inf);
 +      pxa27x_setup_hc(pxa_ohci, inf);
  
        if (inf->init)
                retval = inf->init(dev);
                return retval;
  
        if (cpu_is_pxa3xx())
 -              pxa3xx_u2d_start_hc(&ohci_to_hcd(&ohci->ohci)->self);
 +              pxa3xx_u2d_start_hc(&hcd->self);
  
 -      uhchr = __raw_readl(ohci->mmio_base + UHCHR) & ~UHCHR_SSE;
 -      __raw_writel(uhchr, ohci->mmio_base + UHCHR);
 -      __raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, ohci->mmio_base + UHCHIE);
 +      uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) & ~UHCHR_SSE;
 +      __raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
 +      __raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, pxa_ohci->mmio_base + UHCHIE);
  
        /* Clear any OTG Pin Hold */
        pxa27x_clear_otgph();
        return 0;
  }
  
 -static void pxa27x_stop_hc(struct pxa27x_ohci *ohci, struct device *dev)
 +static void pxa27x_stop_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
  {
        struct pxaohci_platform_data *inf;
 +      struct usb_hcd *hcd = dev_get_drvdata(dev);
        uint32_t uhccoms;
  
        inf = dev_get_platdata(dev);
  
        if (cpu_is_pxa3xx())
 -              pxa3xx_u2d_stop_hc(&ohci_to_hcd(&ohci->ohci)->self);
 +              pxa3xx_u2d_stop_hc(&hcd->self);
  
        if (inf->exit)
                inf->exit(dev);
  
 -      pxa27x_reset_hc(ohci);
 +      pxa27x_reset_hc(pxa_ohci);
  
        /* Host Controller Reset */
 -      uhccoms = __raw_readl(ohci->mmio_base + UHCCOMS) | 0x01;
 -      __raw_writel(uhccoms, ohci->mmio_base + UHCCOMS);
 +      uhccoms = __raw_readl(pxa_ohci->mmio_base + UHCCOMS) | 0x01;
 +      __raw_writel(uhccoms, pxa_ohci->mmio_base + UHCCOMS);
        udelay(10);
  
 -      clk_disable_unprepare(ohci->clk);
 +      clk_disable_unprepare(pxa_ohci->clk);
  }
  
  #ifdef CONFIG_OF
@@@ -298,6 -287,7 +298,7 @@@ static int ohci_pxa_of_init(struct plat
        struct device_node *np = pdev->dev.of_node;
        struct pxaohci_platform_data *pdata;
        u32 tmp;
+       int ret;
  
        if (!np)
                return 0;
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
  
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
@@@ -367,8 -356,7 +367,8 @@@ int usb_hcd_pxa27x_probe (const struct 
        int retval, irq;
        struct usb_hcd *hcd;
        struct pxaohci_platform_data *inf;
 -      struct pxa27x_ohci *ohci;
 +      struct pxa27x_ohci *pxa_ohci;
 +      struct ohci_hcd *ohci;
        struct resource *r;
        struct clk *usb_clk;
  
        }
  
        /* initialize "struct pxa27x_ohci" */
 -      ohci = (struct pxa27x_ohci *)hcd_to_ohci(hcd);
 -      ohci->dev = &pdev->dev;
 -      ohci->clk = usb_clk;
 -      ohci->mmio_base = (void __iomem *)hcd->regs;
 +      pxa_ohci = to_pxa27x_ohci(hcd);
 +      pxa_ohci->clk = usb_clk;
 +      pxa_ohci->mmio_base = (void __iomem *)hcd->regs;
  
 -      if ((retval = pxa27x_start_hc(ohci, &pdev->dev)) < 0) {
 +      retval = pxa27x_start_hc(pxa_ohci, &pdev->dev);
 +      if (retval < 0) {
                pr_debug("pxa27x_start_hc failed");
                goto err3;
        }
  
        /* Select Power Management Mode */
 -      pxa27x_ohci_select_pmm(ohci, inf->port_mode);
 +      pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode);
  
        if (inf->power_budget)
                hcd->power_budget = inf->power_budget;
  
 -      ohci_hcd_init(hcd_to_ohci(hcd));
 +      /* The value of NDP in roothub_a is incorrect on this hardware */
 +      ohci = hcd_to_ohci(hcd);
 +      ohci->num_ports = 3;
  
        retval = usb_add_hcd(hcd, irq, 0);
        if (retval == 0)
                return retval;
  
 -      pxa27x_stop_hc(ohci, &pdev->dev);
 +      pxa27x_stop_hc(pxa_ohci, &pdev->dev);
   err3:
        iounmap(hcd->regs);
   err2:
   */
  void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev)
  {
 -      struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
 +      struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
  
        usb_remove_hcd(hcd);
 -      pxa27x_stop_hc(ohci, &pdev->dev);
 +      pxa27x_stop_hc(pxa_ohci, &pdev->dev);
        iounmap(hcd->regs);
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
 +      clk_put(pxa_ohci->clk);
        usb_put_hcd(hcd);
 -      clk_put(ohci->clk);
 -}
 -
 -/*-------------------------------------------------------------------------*/
 -
 -static int
 -ohci_pxa27x_start (struct usb_hcd *hcd)
 -{
 -      struct ohci_hcd *ohci = hcd_to_ohci (hcd);
 -      int             ret;
 -
 -      ohci_dbg (ohci, "ohci_pxa27x_start, ohci:%p", ohci);
 -
 -      /* The value of NDP in roothub_a is incorrect on this hardware */
 -      ohci->num_ports = 3;
 -
 -      if ((ret = ohci_init(ohci)) < 0)
 -              return ret;
 -
 -      if ((ret = ohci_run (ohci)) < 0) {
 -              dev_err(hcd->self.controller, "can't start %s",
 -                      hcd->self.bus_name);
 -              ohci_stop (hcd);
 -              return ret;
 -      }
 -
 -      return 0;
  }
  
  /*-------------------------------------------------------------------------*/
  
 -static const struct hc_driver ohci_pxa27x_hc_driver = {
 -      .description =          hcd_name,
 -      .product_desc =         "PXA27x OHCI",
 -      .hcd_priv_size =        sizeof(struct pxa27x_ohci),
 -
 -      /*
 -       * generic hardware linkage
 -       */
 -      .irq =                  ohci_irq,
 -      .flags =                HCD_USB11 | HCD_MEMORY,
 -
 -      /*
 -       * basic lifecycle operations
 -       */
 -      .start =                ohci_pxa27x_start,
 -      .stop =                 ohci_stop,
 -      .shutdown =             ohci_shutdown,
 -
 -      /*
 -       * managing i/o requests and associated device resources
 -       */
 -      .urb_enqueue =          ohci_urb_enqueue,
 -      .urb_dequeue =          ohci_urb_dequeue,
 -      .endpoint_disable =     ohci_endpoint_disable,
 -
 -      /*
 -       * scheduling support
 -       */
 -      .get_frame_number =     ohci_get_frame,
 -
 -      /*
 -       * root hub support
 -       */
 -      .hub_status_data =      ohci_hub_status_data,
 -      .hub_control =          ohci_hub_control,
 -#ifdef  CONFIG_PM
 -      .bus_suspend =          ohci_bus_suspend,
 -      .bus_resume =           ohci_bus_resume,
 -#endif
 -      .start_port_reset =     ohci_start_port_reset,
 -};
 -
 -/*-------------------------------------------------------------------------*/
 -
  static int ohci_hcd_pxa27x_drv_probe(struct platform_device *pdev)
  {
        pr_debug ("In ohci_hcd_pxa27x_drv_probe");
@@@ -507,42 -563,32 +507,42 @@@ static int ohci_hcd_pxa27x_drv_remove(s
  static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
  {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
 -      struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
 +      struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
 +      struct ohci_hcd *ohci = hcd_to_ohci(hcd);
 +      bool do_wakeup = device_may_wakeup(dev);
 +      int ret;
  
 -      if (time_before(jiffies, ohci->ohci.next_statechange))
 +
 +      if (time_before(jiffies, ohci->next_statechange))
                msleep(5);
 -      ohci->ohci.next_statechange = jiffies;
 +      ohci->next_statechange = jiffies;
  
 -      pxa27x_stop_hc(ohci, dev);
 -      return 0;
 +      ret = ohci_suspend(hcd, do_wakeup);
 +      if (ret)
 +              return ret;
 +
 +      pxa27x_stop_hc(pxa_ohci, dev);
 +      return ret;
  }
  
  static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
  {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
 -      struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
 +      struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
        struct pxaohci_platform_data *inf = dev_get_platdata(dev);
 +      struct ohci_hcd *ohci = hcd_to_ohci(hcd);
        int status;
  
 -      if (time_before(jiffies, ohci->ohci.next_statechange))
 +      if (time_before(jiffies, ohci->next_statechange))
                msleep(5);
 -      ohci->ohci.next_statechange = jiffies;
 +      ohci->next_statechange = jiffies;
  
 -      if ((status = pxa27x_start_hc(ohci, dev)) < 0)
 +      status = pxa27x_start_hc(pxa_ohci, dev);
 +      if (status < 0)
                return status;
  
        /* Select Power Management Mode */
 -      pxa27x_ohci_select_pmm(ohci, inf->port_mode);
 +      pxa27x_ohci_select_pmm(pxa_ohci, inf->port_mode);
  
        ohci_resume(hcd, false);
        return 0;
@@@ -554,6 -600,9 +554,6 @@@ static const struct dev_pm_ops ohci_hcd
  };
  #endif
  
 -/* work with hotplug and coldplug */
 -MODULE_ALIAS("platform:pxa27x-ohci");
 -
  static struct platform_driver ohci_hcd_pxa27x_driver = {
        .probe          = ohci_hcd_pxa27x_drv_probe,
        .remove         = ohci_hcd_pxa27x_drv_remove,
        },
  };
  
 +static const struct ohci_driver_overrides pxa27x_overrides __initconst = {
 +      .extra_priv_size =      sizeof(struct pxa27x_ohci),
 +};
 +
 +static int __init ohci_pxa27x_init(void)
 +{
 +      if (usb_disabled())
 +              return -ENODEV;
 +
 +      pr_info("%s: " DRIVER_DESC "\n", hcd_name);
 +      ohci_init_driver(&ohci_pxa27x_hc_driver, &pxa27x_overrides);
 +      return platform_driver_register(&ohci_hcd_pxa27x_driver);
 +}
 +module_init(ohci_pxa27x_init);
 +
 +static void __exit ohci_pxa27x_cleanup(void)
 +{
 +      platform_driver_unregister(&ohci_hcd_pxa27x_driver);
 +}
 +module_exit(ohci_pxa27x_cleanup);
 +
 +MODULE_DESCRIPTION(DRIVER_DESC);
 +MODULE_LICENSE("GPL");
 +MODULE_ALIAS("platform:pxa27x-ohci");
  * warranty of any kind, whether express or implied.
  */
  
 -#include <linux/signal.h>
 -#include <linux/platform_device.h>
  #include <linux/clk.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/io.h>
 +#include <linux/kernel.h>
 +#include <linux/module.h>
  #include <linux/of.h>
 +#include <linux/platform_device.h>
 +#include <linux/signal.h>
 +#include <linux/usb.h>
 +#include <linux/usb/hcd.h>
 +
 +#include "ohci.h"
  
 +#define DRIVER_DESC "OHCI SPEAr driver"
 +
 +static const char hcd_name[] = "SPEAr-ohci";
  struct spear_ohci {
 -      struct ohci_hcd ohci;
        struct clk *clk;
  };
  
 -#define to_spear_ohci(hcd)    (struct spear_ohci *)hcd_to_ohci(hcd)
 -
 -static void spear_start_ohci(struct spear_ohci *ohci)
 -{
 -      clk_prepare_enable(ohci->clk);
 -}
 -
 -static void spear_stop_ohci(struct spear_ohci *ohci)
 -{
 -      clk_disable_unprepare(ohci->clk);
 -}
 -
 -static int ohci_spear_start(struct usb_hcd *hcd)
 -{
 -      struct ohci_hcd *ohci = hcd_to_ohci(hcd);
 -      int ret;
 -
 -      ret = ohci_init(ohci);
 -      if (ret < 0)
 -              return ret;
 -      ohci->regs = hcd->regs;
 -
 -      ret = ohci_run(ohci);
 -      if (ret < 0) {
 -              dev_err(hcd->self.controller, "can't start\n");
 -              ohci_stop(hcd);
 -              return ret;
 -      }
 -
 -      create_debug_files(ohci);
 -
 -#ifdef DEBUG
 -      ohci_dump(ohci, 1);
 -#endif
 -      return 0;
 -}
 -
 -static const struct hc_driver ohci_spear_hc_driver = {
 -      .description            = hcd_name,
 -      .product_desc           = "SPEAr OHCI",
 -      .hcd_priv_size          = sizeof(struct spear_ohci),
 -
 -      /* generic hardware linkage */
 -      .irq                    = ohci_irq,
 -      .flags                  = HCD_USB11 | HCD_MEMORY,
 -
 -      /* basic lifecycle operations */
 -      .start                  = ohci_spear_start,
 -      .stop                   = ohci_stop,
 -      .shutdown               = ohci_shutdown,
 -#ifdef        CONFIG_PM
 -      .bus_suspend            = ohci_bus_suspend,
 -      .bus_resume             = ohci_bus_resume,
 -#endif
 -
 -      /* managing i/o requests and associated device resources */
 -      .urb_enqueue            = ohci_urb_enqueue,
 -      .urb_dequeue            = ohci_urb_dequeue,
 -      .endpoint_disable       = ohci_endpoint_disable,
 -
 -      /* scheduling support */
 -      .get_frame_number       = ohci_get_frame,
 +#define to_spear_ohci(hcd)     (struct spear_ohci *)(hcd_to_ohci(hcd)->priv)
  
 -      /* root hub support */
 -      .hub_status_data        = ohci_hub_status_data,
 -      .hub_control            = ohci_hub_control,
 -
 -      .start_port_reset       = ohci_start_port_reset,
 -};
 +static struct hc_driver __read_mostly ohci_spear_hc_driver;
  
  static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
  {
        const struct hc_driver *driver = &ohci_spear_hc_driver;
 +      struct ohci_hcd *ohci;
        struct usb_hcd *hcd = NULL;
        struct clk *usbh_clk;
 -      struct spear_ohci *ohci_p;
 +      struct spear_ohci *sohci_p;
        struct resource *res;
        int retval, irq;
  
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               goto fail;
  
        usbh_clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(usbh_clk)) {
                goto err_put_hcd;
        }
  
 -      ohci_p = (struct spear_ohci *)hcd_to_ohci(hcd);
 -      ohci_p->clk = usbh_clk;
 -      spear_start_ohci(ohci_p);
 -      ohci_hcd_init(hcd_to_ohci(hcd));
 +      sohci_p = to_spear_ohci(hcd);
 +      sohci_p->clk = usbh_clk;
 +
 +      clk_prepare_enable(sohci_p->clk);
 +
 +      ohci = hcd_to_ohci(hcd);
  
        retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), 0);
        if (retval == 0)
                return retval;
  
 -      spear_stop_ohci(ohci_p);
 +      clk_disable_unprepare(sohci_p->clk);
  err_put_hcd:
        usb_put_hcd(hcd);
  fail:
  static int spear_ohci_hcd_drv_remove(struct platform_device *pdev)
  {
        struct usb_hcd *hcd = platform_get_drvdata(pdev);
 -      struct spear_ohci *ohci_p = to_spear_ohci(hcd);
 +      struct spear_ohci *sohci_p = to_spear_ohci(hcd);
  
        usb_remove_hcd(hcd);
 -      if (ohci_p->clk)
 -              spear_stop_ohci(ohci_p);
 +      if (sohci_p->clk)
 +              clk_disable_unprepare(sohci_p->clk);
  
        usb_put_hcd(hcd);
        return 0;
@@@ -135,14 -187,13 +134,14 @@@ static int spear_ohci_hcd_drv_suspend(s
  {
        struct usb_hcd *hcd = platform_get_drvdata(dev);
        struct ohci_hcd *ohci = hcd_to_ohci(hcd);
 -      struct spear_ohci *ohci_p = to_spear_ohci(hcd);
 +      struct spear_ohci *sohci_p = to_spear_ohci(hcd);
  
        if (time_before(jiffies, ohci->next_statechange))
                msleep(5);
        ohci->next_statechange = jiffies;
  
 -      spear_stop_ohci(ohci_p);
 +      clk_disable_unprepare(sohci_p->clk);
 +
        return 0;
  }
  
@@@ -150,13 -201,13 +149,13 @@@ static int spear_ohci_hcd_drv_resume(st
  {
        struct usb_hcd *hcd = platform_get_drvdata(dev);
        struct ohci_hcd *ohci = hcd_to_ohci(hcd);
 -      struct spear_ohci *ohci_p = to_spear_ohci(hcd);
 +      struct spear_ohci *sohci_p = to_spear_ohci(hcd);
  
        if (time_before(jiffies, ohci->next_statechange))
                msleep(5);
        ohci->next_statechange = jiffies;
  
 -      spear_start_ohci(ohci_p);
 +      clk_prepare_enable(sohci_p->clk);
        ohci_resume(hcd, false);
        return 0;
  }
@@@ -182,28 -233,4 +181,28 @@@ static struct platform_driver spear_ohc
        },
  };
  
 +static const struct ohci_driver_overrides spear_overrides __initconst = {
 +      .extra_priv_size = sizeof(struct spear_ohci),
 +};
 +static int __init ohci_spear_init(void)
 +{
 +      if (usb_disabled())
 +              return -ENODEV;
 +
 +      pr_info("%s: " DRIVER_DESC "\n", hcd_name);
 +
 +      ohci_init_driver(&ohci_spear_hc_driver, &spear_overrides);
 +      return platform_driver_register(&spear_ohci_hcd_driver);
 +}
 +module_init(ohci_spear_init);
 +
 +static void __exit ohci_spear_cleanup(void)
 +{
 +      platform_driver_unregister(&spear_ohci_hcd_driver);
 +}
 +module_exit(ohci_spear_cleanup);
 +
 +MODULE_DESCRIPTION(DRIVER_DESC);
 +MODULE_AUTHOR("Deepak Sikri");
 +MODULE_LICENSE("GPL v2");
  MODULE_ALIAS("platform:spear-ohci");
@@@ -75,10 -75,9 +75,9 @@@ static int uhci_hcd_platform_probe(stru
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
  
        hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
                        pdev->name);
  
        uhci->regs = hcd->regs;
  
 -      ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED |
 -                                                              IRQF_SHARED);
 +      ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
        if (ret)
                goto err_uhci;
  
@@@ -50,7 -50,7 +50,7 @@@ static int atmel_pcm_preallocate_dma_bu
        buf->area = dma_alloc_coherent(pcm->card->dev, size,
                        &buf->addr, GFP_KERNEL);
        pr_debug("atmel-pcm: alloc dma buffer: area=%p, addr=%p, size=%zu\n",
 -                      (void *)buf->area, (void *)buf->addr, size);
 +                      (void *)buf->area, (void *)(long)buf->addr, size);
  
        if (!buf->area)
                return -ENOMEM;
@@@ -68,18 -68,15 +68,15 @@@ int atmel_pcm_mmap(struct snd_pcm_subst
  }
  EXPORT_SYMBOL_GPL(atmel_pcm_mmap);
  
- static u64 atmel_pcm_dmamask = DMA_BIT_MASK(32);
  int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
  {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
  
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &atmel_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
  
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
@@@ -267,9 -267,10 +267,9 @@@ static int allocate_sram(struct snd_pcm
                return 0;
  
        ppcm->period_bytes_max = size;
 -      iram_virt = (void *)gen_pool_alloc(sram_pool, size);
 +      iram_virt = gen_pool_dma_alloc(sram_pool, size, &iram_phys);
        if (!iram_virt)
                goto exit1;
 -      iram_phys = gen_pool_virt_to_phys(sram_pool, (unsigned)iram_virt);
        iram_dma = kzalloc(sizeof(*iram_dma), GFP_KERNEL);
        if (!iram_dma)
                goto exit2;
@@@ -843,18 -844,15 +843,15 @@@ static void davinci_pcm_free(struct snd
        }
  }
  
- static u64 davinci_pcm_dmamask = DMA_BIT_MASK(32);
  static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
  {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
        int ret;
  
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &davinci_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
  
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = davinci_pcm_preallocate_dma_buffer(pcm,
diff --combined sound/soc/fsl/fsl_dma.c
@@@ -21,8 -21,6 +21,8 @@@
  #include <linux/interrupt.h>
  #include <linux/delay.h>
  #include <linux/gfp.h>
 +#include <linux/of_address.h>
 +#include <linux/of_irq.h>
  #include <linux/of_platform.h>
  #include <linux/list.h>
  #include <linux/slab.h>
@@@ -300,14 -298,11 +300,11 @@@ static int fsl_dma_new(struct snd_soc_p
  {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       static u64 fsl_dma_dmamask = DMA_BIT_MASK(36);
        int ret;
  
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &fsl_dma_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = fsl_dma_dmamask;
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(36));
+       if (ret)
+               return ret;
  
        /* Some codecs have separate DAIs for playback and capture, so we
         * should allocate a DMA buffer only for the streams that are valid.
@@@ -39,6 -39,8 +39,6 @@@ struct imx_pcm_runtime_data 
        unsigned int period;
        int periods;
        unsigned long offset;
 -      unsigned long last_offset;
 -      unsigned long size;
        struct hrtimer hrt;
        int poll_time_ns;
        struct snd_pcm_substream *substream;
@@@ -50,7 -52,9 +50,7 @@@ static enum hrtimer_restart snd_hrtimer
        struct imx_pcm_runtime_data *iprtd =
                container_of(hrt, struct imx_pcm_runtime_data, hrt);
        struct snd_pcm_substream *substream = iprtd->substream;
 -      struct snd_pcm_runtime *runtime = substream->runtime;
        struct pt_regs regs;
 -      unsigned long delta;
  
        if (!atomic_read(&iprtd->running))
                return HRTIMER_NORESTART;
        else
                iprtd->offset = regs.ARM_r9 & 0xffff;
  
 -      /* How much data have we transferred since the last period report? */
 -      if (iprtd->offset >= iprtd->last_offset)
 -              delta = iprtd->offset - iprtd->last_offset;
 -      else
 -              delta = runtime->buffer_size + iprtd->offset
 -                      - iprtd->last_offset;
 -
 -      /* If we've transferred at least a period then report it and
 -       * reset our poll time */
 -      if (delta >= iprtd->period) {
 -              snd_pcm_period_elapsed(substream);
 -              iprtd->last_offset = iprtd->offset;
 -      }
 +      snd_pcm_period_elapsed(substream);
  
        hrtimer_forward_now(hrt, ns_to_ktime(iprtd->poll_time_ns));
  
@@@ -79,9 -95,11 +79,9 @@@ static int snd_imx_pcm_hw_params(struc
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct imx_pcm_runtime_data *iprtd = runtime->private_data;
  
 -      iprtd->size = params_buffer_bytes(params);
        iprtd->periods = params_periods(params);
 -      iprtd->period = params_period_bytes(params) ;
 +      iprtd->period = params_period_bytes(params);
        iprtd->offset = 0;
 -      iprtd->last_offset = 0;
        iprtd->poll_time_ns = 1000000000 / params_rate(params) *
                                params_period_size(params);
        snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
@@@ -254,18 -272,16 +254,16 @@@ static int imx_pcm_preallocate_dma_buff
        return 0;
  }
  
- static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
  static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
  {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
  
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &imx_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = imx_pcm_preallocate_dma_buffer(pcm,
                        SNDRV_PCM_STREAM_PLAYBACK);
@@@ -10,8 -10,6 +10,8 @@@
  #include <linux/of_device.h>
  #include <linux/dma-mapping.h>
  #include <linux/slab.h>
 +#include <linux/of_address.h>
 +#include <linux/of_irq.h>
  #include <linux/of_platform.h>
  
  #include <sound/soc.h>
@@@ -301,7 -299,6 +301,6 @@@ static struct snd_pcm_ops psc_dma_ops 
        .hw_params      = psc_dma_hw_params,
  };
  
- static u64 psc_dma_dmamask = DMA_BIT_MASK(32);
  static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
  {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
        struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
        size_t size = psc_dma_hardware.buffer_bytes_max;
-       int rc = 0;
+       int rc;
  
        dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
                card, dai, pcm);
  
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &psc_dma_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       rc = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
  
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
@@@ -29,7 -29,9 +29,7 @@@
  #define KIRKWOOD_FORMATS \
        (SNDRV_PCM_FMTBIT_S16_LE | \
         SNDRV_PCM_FMTBIT_S24_LE | \
 -       SNDRV_PCM_FMTBIT_S32_LE | \
 -       SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | \
 -       SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE)
 +       SNDRV_PCM_FMTBIT_S32_LE)
  
  static struct kirkwood_dma_data *kirkwood_priv(struct snd_pcm_substream *subs)
  {
@@@ -57,8 -59,6 +57,6 @@@ static struct snd_pcm_hardware kirkwood
        .fifo_size              = 0,
  };
  
- static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
  static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
  {
        struct kirkwood_dma_data *priv = dev_id;
@@@ -159,7 -159,7 +157,7 @@@ static int kirkwood_dma_open(struct snd
                 * Enable Error interrupts. We're only ack'ing them but
                 * it's useful for diagnostics
                 */
 -              writel((unsigned long)-1, priv->io + KIRKWOOD_ERR_MASK);
 +              writel((unsigned int)-1, priv->io + KIRKWOOD_ERR_MASK);
        }
  
        dram = mv_mbus_dram_info();
@@@ -290,10 -290,9 +288,9 @@@ static int kirkwood_dma_new(struct snd_
        struct snd_pcm *pcm = rtd->pcm;
        int ret;
  
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &kirkwood_dma_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
  
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = kirkwood_dma_preallocate_dma_buffer(pcm,
@@@ -90,8 -90,7 +90,8 @@@ static void s6000_pcm_enqueue_dma(struc
                return;
        }
  
 -      BUG_ON(period_size & 15);
 +      if (WARN_ON(period_size & 15))
 +              return;
        s6dmac_put_fifo(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel),
                        src, dst, period_size);
  
@@@ -445,8 -444,6 +445,6 @@@ static void s6000_pcm_free(struct snd_p
        snd_pcm_lib_preallocate_free_for_all(pcm);
  }
  
- static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
  static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
  {
        struct snd_card *card = runtime->card->snd_card;
        params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
                        pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
  
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &s6000_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (res)
+               return res;
  
        if (params->dma_in) {
                s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),