Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
authorDavid S. Miller <davem@davemloft.net>
Wed, 21 Sep 2016 02:52:50 +0000 (22:52 -0400)
committerDavid S. Miller <davem@davemloft.net>
Wed, 21 Sep 2016 02:52:50 +0000 (22:52 -0400)
Johan Hedberg says:

====================
pull request: bluetooth-next 2016-09-19

Here's the main bluetooth-next pull request for the 4.9 kernel.

 - Added new messages for monitor sockets for better mgmt tracing
 - Added local name and appearance support in scan response
 - Added new Qualcomm WCNSS SMD based HCI driver
 - Minor fixes & cleanup to 802.15.4 code
 - New USB ID to btusb driver
 - Added Marvell support to HCI UART driver
 - Add combined LED trigger for controller power
 - Other minor fixes here and there

Please let me know if there are any issues pulling. Thanks.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
61 files changed:
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/qca8k.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/b44.h
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bcm63xx_enet.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/hamradio/6pack.c
drivers/net/phy/microchip.c
drivers/net/phy/mscc.c
drivers/net/xen-netfront.c
include/linux/rhashtable.h
include/net/tc_act/tc_ife.h
include/uapi/linux/tc_act/tc_ife.h
lib/rhashtable.c
net/core/skbuff.c
net/dsa/dsa.c
net/dsa/dsa2.c
net/ipv4/af_inet.c
net/ipv4/gre_offload.c
net/ipv4/tcp_offload.c
net/ipv4/udp_offload.c
net/ipv6/ip6_offload.c
net/mac80211/ieee80211_i.h
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/sched/Kconfig
net/sched/Makefile
net/sched/act_api.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_meta_skbtcindex.c [new file with mode: 0644]
net/sched/act_mirred.c
net/sched/act_police.c
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/cls_flow.c
net/sched/cls_flower.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/sch_api.c

index 0afc2e5..1a492c0 100644 (file)
@@ -764,11 +764,6 @@ static int b53_get_sset_count(struct dsa_switch *ds)
        return b53_get_mib_size(dev);
 }
 
-static int b53_set_addr(struct dsa_switch *ds, u8 *addr)
-{
-       return 0;
-}
-
 static int b53_setup(struct dsa_switch *ds)
 {
        struct b53_device *dev = ds->priv;
@@ -1466,7 +1461,6 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds)
 static struct dsa_switch_ops b53_switch_ops = {
        .get_tag_protocol       = b53_get_tag_protocol,
        .setup                  = b53_setup,
-       .set_addr               = b53_set_addr,
        .get_strings            = b53_get_strings,
        .get_ethtool_stats      = b53_get_ethtool_stats,
        .get_sset_count         = b53_get_sset_count,
index 7f3f178..4788a89 100644 (file)
@@ -585,13 +585,6 @@ qca8k_setup(struct dsa_switch *ds)
        return 0;
 }
 
-static int
-qca8k_set_addr(struct dsa_switch *ds, u8 *addr)
-{
-       /* The subsystem always calls this function so add an empty stub */
-       return 0;
-}
-
 static int
 qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
 {
@@ -921,7 +914,6 @@ qca8k_get_tag_protocol(struct dsa_switch *ds)
 static struct dsa_switch_ops qca8k_switch_ops = {
        .get_tag_protocol       = qca8k_get_tag_protocol,
        .setup                  = qca8k_setup,
-       .set_addr               = qca8k_set_addr,
        .get_strings            = qca8k_get_strings,
        .phy_read               = qca8k_phy_read,
        .phy_write              = qca8k_phy_write,
index 74f0a37..17aa33c 100644 (file)
@@ -1486,7 +1486,7 @@ static int b44_open(struct net_device *dev)
        b44_enable_ints(bp);
 
        if (bp->flags & B44_FLAG_EXTERNAL_PHY)
-               phy_start(bp->phydev);
+               phy_start(dev->phydev);
 
        netif_start_queue(dev);
 out:
@@ -1651,7 +1651,7 @@ static int b44_close(struct net_device *dev)
        netif_stop_queue(dev);
 
        if (bp->flags & B44_FLAG_EXTERNAL_PHY)
-               phy_stop(bp->phydev);
+               phy_stop(dev->phydev);
 
        napi_disable(&bp->napi);
 
@@ -1832,90 +1832,100 @@ static int b44_nway_reset(struct net_device *dev)
        return r;
 }
 
-static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int b44_get_link_ksettings(struct net_device *dev,
+                                 struct ethtool_link_ksettings *cmd)
 {
        struct b44 *bp = netdev_priv(dev);
+       u32 supported, advertising;
 
        if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
-               BUG_ON(!bp->phydev);
-               return phy_ethtool_gset(bp->phydev, cmd);
+               BUG_ON(!dev->phydev);
+               return phy_ethtool_ksettings_get(dev->phydev, cmd);
        }
 
-       cmd->supported = (SUPPORTED_Autoneg);
-       cmd->supported |= (SUPPORTED_100baseT_Half |
-                         SUPPORTED_100baseT_Full |
-                         SUPPORTED_10baseT_Half |
-                         SUPPORTED_10baseT_Full |
-                         SUPPORTED_MII);
+       supported = (SUPPORTED_Autoneg);
+       supported |= (SUPPORTED_100baseT_Half |
+                     SUPPORTED_100baseT_Full |
+                     SUPPORTED_10baseT_Half |
+                     SUPPORTED_10baseT_Full |
+                     SUPPORTED_MII);
 
-       cmd->advertising = 0;
+       advertising = 0;
        if (bp->flags & B44_FLAG_ADV_10HALF)
-               cmd->advertising |= ADVERTISED_10baseT_Half;
+               advertising |= ADVERTISED_10baseT_Half;
        if (bp->flags & B44_FLAG_ADV_10FULL)
-               cmd->advertising |= ADVERTISED_10baseT_Full;
+               advertising |= ADVERTISED_10baseT_Full;
        if (bp->flags & B44_FLAG_ADV_100HALF)
-               cmd->advertising |= ADVERTISED_100baseT_Half;
+               advertising |= ADVERTISED_100baseT_Half;
        if (bp->flags & B44_FLAG_ADV_100FULL)
-               cmd->advertising |= ADVERTISED_100baseT_Full;
-       cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-       ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
-                                   SPEED_100 : SPEED_10));
-       cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
+               advertising |= ADVERTISED_100baseT_Full;
+       advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+       cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
+               SPEED_100 : SPEED_10;
+       cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
                DUPLEX_FULL : DUPLEX_HALF;
-       cmd->port = 0;
-       cmd->phy_address = bp->phy_addr;
-       cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
-               XCVR_EXTERNAL : XCVR_INTERNAL;
-       cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
+       cmd->base.port = 0;
+       cmd->base.phy_address = bp->phy_addr;
+       cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
                AUTONEG_DISABLE : AUTONEG_ENABLE;
-       if (cmd->autoneg == AUTONEG_ENABLE)
-               cmd->advertising |= ADVERTISED_Autoneg;
+       if (cmd->base.autoneg == AUTONEG_ENABLE)
+               advertising |= ADVERTISED_Autoneg;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+
        if (!netif_running(dev)){
-               ethtool_cmd_speed_set(cmd, 0);
-               cmd->duplex = 0xff;
+               cmd->base.speed = 0;
+               cmd->base.duplex = 0xff;
        }
-       cmd->maxtxpkt = 0;
-       cmd->maxrxpkt = 0;
+
        return 0;
 }
 
-static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int b44_set_link_ksettings(struct net_device *dev,
+                                 const struct ethtool_link_ksettings *cmd)
 {
        struct b44 *bp = netdev_priv(dev);
        u32 speed;
        int ret;
+       u32 advertising;
 
        if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
-               BUG_ON(!bp->phydev);
+               BUG_ON(!dev->phydev);
                spin_lock_irq(&bp->lock);
                if (netif_running(dev))
                        b44_setup_phy(bp);
 
-               ret = phy_ethtool_sset(bp->phydev, cmd);
+               ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
 
                spin_unlock_irq(&bp->lock);
 
                return ret;
        }
 
-       speed = ethtool_cmd_speed(cmd);
+       speed = cmd->base.speed;
+
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
 
        /* We do not support gigabit. */
-       if (cmd->autoneg == AUTONEG_ENABLE) {
-               if (cmd->advertising &
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
+               if (advertising &
                    (ADVERTISED_1000baseT_Half |
                     ADVERTISED_1000baseT_Full))
                        return -EINVAL;
        } else if ((speed != SPEED_100 &&
                    speed != SPEED_10) ||
-                  (cmd->duplex != DUPLEX_HALF &&
-                   cmd->duplex != DUPLEX_FULL)) {
+                  (cmd->base.duplex != DUPLEX_HALF &&
+                   cmd->base.duplex != DUPLEX_FULL)) {
                        return -EINVAL;
        }
 
        spin_lock_irq(&bp->lock);
 
-       if (cmd->autoneg == AUTONEG_ENABLE) {
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
                bp->flags &= ~(B44_FLAG_FORCE_LINK |
                               B44_FLAG_100_BASE_T |
                               B44_FLAG_FULL_DUPLEX |
@@ -1923,19 +1933,19 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                               B44_FLAG_ADV_10FULL |
                               B44_FLAG_ADV_100HALF |
                               B44_FLAG_ADV_100FULL);
-               if (cmd->advertising == 0) {
+               if (advertising == 0) {
                        bp->flags |= (B44_FLAG_ADV_10HALF |
                                      B44_FLAG_ADV_10FULL |
                                      B44_FLAG_ADV_100HALF |
                                      B44_FLAG_ADV_100FULL);
                } else {
-                       if (cmd->advertising & ADVERTISED_10baseT_Half)
+                       if (advertising & ADVERTISED_10baseT_Half)
                                bp->flags |= B44_FLAG_ADV_10HALF;
-                       if (cmd->advertising & ADVERTISED_10baseT_Full)
+                       if (advertising & ADVERTISED_10baseT_Full)
                                bp->flags |= B44_FLAG_ADV_10FULL;
-                       if (cmd->advertising & ADVERTISED_100baseT_Half)
+                       if (advertising & ADVERTISED_100baseT_Half)
                                bp->flags |= B44_FLAG_ADV_100HALF;
-                       if (cmd->advertising & ADVERTISED_100baseT_Full)
+                       if (advertising & ADVERTISED_100baseT_Full)
                                bp->flags |= B44_FLAG_ADV_100FULL;
                }
        } else {
@@ -1943,7 +1953,7 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
                if (speed == SPEED_100)
                        bp->flags |= B44_FLAG_100_BASE_T;
-               if (cmd->duplex == DUPLEX_FULL)
+               if (cmd->base.duplex == DUPLEX_FULL)
                        bp->flags |= B44_FLAG_FULL_DUPLEX;
        }
 
@@ -2110,8 +2120,6 @@ static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 
 static const struct ethtool_ops b44_ethtool_ops = {
        .get_drvinfo            = b44_get_drvinfo,
-       .get_settings           = b44_get_settings,
-       .set_settings           = b44_set_settings,
        .nway_reset             = b44_nway_reset,
        .get_link               = ethtool_op_get_link,
        .get_wol                = b44_get_wol,
@@ -2125,6 +2133,8 @@ static const struct ethtool_ops b44_ethtool_ops = {
        .get_strings            = b44_get_strings,
        .get_sset_count         = b44_get_sset_count,
        .get_ethtool_stats      = b44_get_ethtool_stats,
+       .get_link_ksettings     = b44_get_link_ksettings,
+       .set_link_ksettings     = b44_set_link_ksettings,
 };
 
 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -2137,8 +2147,8 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
        spin_lock_irq(&bp->lock);
        if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
-               BUG_ON(!bp->phydev);
-               err = phy_mii_ioctl(bp->phydev, ifr, cmd);
+               BUG_ON(!dev->phydev);
+               err = phy_mii_ioctl(dev->phydev, ifr, cmd);
        } else {
                err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
        }
@@ -2206,7 +2216,7 @@ static const struct net_device_ops b44_netdev_ops = {
 static void b44_adjust_link(struct net_device *dev)
 {
        struct b44 *bp = netdev_priv(dev);
-       struct phy_device *phydev = bp->phydev;
+       struct phy_device *phydev = dev->phydev;
        bool status_changed = 0;
 
        BUG_ON(!phydev);
@@ -2303,7 +2313,6 @@ static int b44_register_phy_one(struct b44 *bp)
                              SUPPORTED_MII);
        phydev->advertising = phydev->supported;
 
-       bp->phydev = phydev;
        bp->old_link = 0;
        bp->phy_addr = phydev->mdio.addr;
 
@@ -2323,9 +2332,10 @@ err_out:
 
 static void b44_unregister_phy_one(struct b44 *bp)
 {
+       struct net_device *dev = bp->dev;
        struct mii_bus *mii_bus = bp->mii_bus;
 
-       phy_disconnect(bp->phydev);
+       phy_disconnect(dev->phydev);
        mdiobus_unregister(mii_bus);
        mdiobus_free(mii_bus);
 }
index 65d88d7..89d2cf3 100644 (file)
@@ -404,7 +404,6 @@ struct b44 {
        u32                     tx_pending;
        u8                      phy_addr;
        u8                      force_copybreak;
-       struct phy_device       *phydev;
        struct mii_bus          *mii_bus;
        int                     old_link;
        struct mii_if_info      mii_if;
index 6c8bc5f..ae364c7 100644 (file)
@@ -791,7 +791,7 @@ static void bcm_enet_adjust_phy_link(struct net_device *dev)
        int status_changed;
 
        priv = netdev_priv(dev);
-       phydev = priv->phydev;
+       phydev = dev->phydev;
        status_changed = 0;
 
        if (priv->old_link != phydev->link) {
@@ -913,7 +913,6 @@ static int bcm_enet_open(struct net_device *dev)
                priv->old_link = 0;
                priv->old_duplex = -1;
                priv->old_pause = -1;
-               priv->phydev = phydev;
        }
 
        /* mask all interrupts and request them */
@@ -1085,7 +1084,7 @@ static int bcm_enet_open(struct net_device *dev)
                         ENETDMAC_IRMASK, priv->tx_chan);
 
        if (priv->has_phy)
-               phy_start(priv->phydev);
+               phy_start(phydev);
        else
                bcm_enet_adjust_link(dev);
 
@@ -1127,7 +1126,7 @@ out_freeirq:
        free_irq(dev->irq, dev);
 
 out_phy_disconnect:
-       phy_disconnect(priv->phydev);
+       phy_disconnect(phydev);
 
        return ret;
 }
@@ -1190,7 +1189,7 @@ static int bcm_enet_stop(struct net_device *dev)
        netif_stop_queue(dev);
        napi_disable(&priv->napi);
        if (priv->has_phy)
-               phy_stop(priv->phydev);
+               phy_stop(dev->phydev);
        del_timer_sync(&priv->rx_timeout);
 
        /* mask all interrupts */
@@ -1234,10 +1233,8 @@ static int bcm_enet_stop(struct net_device *dev)
        free_irq(dev->irq, dev);
 
        /* release phy */
-       if (priv->has_phy) {
-               phy_disconnect(priv->phydev);
-               priv->phydev = NULL;
-       }
+       if (priv->has_phy)
+               phy_disconnect(dev->phydev);
 
        return 0;
 }
@@ -1437,64 +1434,68 @@ static int bcm_enet_nway_reset(struct net_device *dev)
 
        priv = netdev_priv(dev);
        if (priv->has_phy) {
-               if (!priv->phydev)
+               if (!dev->phydev)
                        return -ENODEV;
-               return genphy_restart_aneg(priv->phydev);
+               return genphy_restart_aneg(dev->phydev);
        }
 
        return -EOPNOTSUPP;
 }
 
-static int bcm_enet_get_settings(struct net_device *dev,
-                                struct ethtool_cmd *cmd)
+static int bcm_enet_get_link_ksettings(struct net_device *dev,
+                                      struct ethtool_link_ksettings *cmd)
 {
        struct bcm_enet_priv *priv;
+       u32 supported, advertising;
 
        priv = netdev_priv(dev);
 
-       cmd->maxrxpkt = 0;
-       cmd->maxtxpkt = 0;
-
        if (priv->has_phy) {
-               if (!priv->phydev)
+               if (!dev->phydev)
                        return -ENODEV;
-               return phy_ethtool_gset(priv->phydev, cmd);
+               return phy_ethtool_ksettings_get(dev->phydev, cmd);
        } else {
-               cmd->autoneg = 0;
-               ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
-                                           ? SPEED_100 : SPEED_10));
-               cmd->duplex = (priv->force_duplex_full) ?
+               cmd->base.autoneg = 0;
+               cmd->base.speed = (priv->force_speed_100) ?
+                       SPEED_100 : SPEED_10;
+               cmd->base.duplex = (priv->force_duplex_full) ?
                        DUPLEX_FULL : DUPLEX_HALF;
-               cmd->supported = ADVERTISED_10baseT_Half  |
+               supported = ADVERTISED_10baseT_Half |
                        ADVERTISED_10baseT_Full |
                        ADVERTISED_100baseT_Half |
                        ADVERTISED_100baseT_Full;
-               cmd->advertising = 0;
-               cmd->port = PORT_MII;
-               cmd->transceiver = XCVR_EXTERNAL;
+               advertising = 0;
+               ethtool_convert_legacy_u32_to_link_mode(
+                       cmd->link_modes.supported, supported);
+               ethtool_convert_legacy_u32_to_link_mode(
+                       cmd->link_modes.advertising, advertising);
+               cmd->base.port = PORT_MII;
        }
        return 0;
 }
 
-static int bcm_enet_set_settings(struct net_device *dev,
-                                struct ethtool_cmd *cmd)
+static int bcm_enet_set_link_ksettings(struct net_device *dev,
+                                      const struct ethtool_link_ksettings *cmd)
 {
        struct bcm_enet_priv *priv;
 
        priv = netdev_priv(dev);
        if (priv->has_phy) {
-               if (!priv->phydev)
+               if (!dev->phydev)
                        return -ENODEV;
-               return phy_ethtool_sset(priv->phydev, cmd);
+               return phy_ethtool_ksettings_set(dev->phydev, cmd);
        } else {
 
-               if (cmd->autoneg ||
-                   (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
-                   cmd->port != PORT_MII)
+               if (cmd->base.autoneg ||
+                   (cmd->base.speed != SPEED_100 &&
+                    cmd->base.speed != SPEED_10) ||
+                   cmd->base.port != PORT_MII)
                        return -EINVAL;
 
-               priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
-               priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
+               priv->force_speed_100 =
+                       (cmd->base.speed == SPEED_100) ? 1 : 0;
+               priv->force_duplex_full =
+                       (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
 
                if (netif_running(dev))
                        bcm_enet_adjust_link(dev);
@@ -1588,14 +1589,14 @@ static const struct ethtool_ops bcm_enet_ethtool_ops = {
        .get_sset_count         = bcm_enet_get_sset_count,
        .get_ethtool_stats      = bcm_enet_get_ethtool_stats,
        .nway_reset             = bcm_enet_nway_reset,
-       .get_settings           = bcm_enet_get_settings,
-       .set_settings           = bcm_enet_set_settings,
        .get_drvinfo            = bcm_enet_get_drvinfo,
        .get_link               = ethtool_op_get_link,
        .get_ringparam          = bcm_enet_get_ringparam,
        .set_ringparam          = bcm_enet_set_ringparam,
        .get_pauseparam         = bcm_enet_get_pauseparam,
        .set_pauseparam         = bcm_enet_set_pauseparam,
+       .get_link_ksettings     = bcm_enet_get_link_ksettings,
+       .set_link_ksettings     = bcm_enet_set_link_ksettings,
 };
 
 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1604,9 +1605,9 @@ static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 
        priv = netdev_priv(dev);
        if (priv->has_phy) {
-               if (!priv->phydev)
+               if (!dev->phydev)
                        return -ENODEV;
-               return phy_mii_ioctl(priv->phydev, rq, cmd);
+               return phy_mii_ioctl(dev->phydev, rq, cmd);
        } else {
                struct mii_if_info mii;
 
index f55af43..0a1b7b2 100644 (file)
@@ -290,7 +290,6 @@ struct bcm_enet_priv {
 
        /* used when a phy is connected (phylib used) */
        struct mii_bus *mii_bus;
-       struct phy_device *phydev;
        int old_link;
        int old_duplex;
        int old_pause;
index 228c964..a9f9f37 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/mii.h>
 #include <linux/if.h>
 #include <linux/if_vlan.h>
+#include <linux/rtc.h>
 #include <net/ip.h>
 #include <net/tcp.h>
 #include <net/udp.h>
@@ -93,50 +94,49 @@ enum board_idx {
        BCM57404_NPAR,
        BCM57406_NPAR,
        BCM57407_SFP,
+       BCM57407_NPAR,
        BCM57414_NPAR,
        BCM57416_NPAR,
-       BCM57304_VF,
-       BCM57404_VF,
-       BCM57414_VF,
-       BCM57314_VF,
+       NETXTREME_E_VF,
+       NETXTREME_C_VF,
 };
 
 /* indexed by enum above */
 static const struct {
        char *name;
 } board_info[] = {
-       { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
-       { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
-       { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+       { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
+       { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
+       { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
        { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
-       { "Broadcom BCM58700 Nitro 4-port 1Gb/2.5Gb/10Gb Ethernet" },
-       { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" },
-       { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
-       { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
-       { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
-       { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+       { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
+       { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
+       { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
+       { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
+       { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
+       { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
        { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
-       { "Broadcom BCM57407 NetXtreme-E Dual-port 10GBase-T Ethernet" },
-       { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" },
-       { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
-       { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" },
-       { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+       { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
+       { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
+       { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
+       { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
+       { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
        { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
-       { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
-       { "Broadcom BCM57417 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
-       { "Broadcom BCM57416 NetXtreme-E Dual-port 10Gb Ethernet" },
+       { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
+       { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
+       { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
        { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
        { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
-       { "Broadcom BCM57407 NetXtreme-E Dual-port 25Gb Ethernet" },
+       { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
+       { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
        { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
        { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
-       { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
-       { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
-       { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" },
-       { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" },
+       { "Broadcom NetXtreme-E Ethernet Virtual Function" },
+       { "Broadcom NetXtreme-C Ethernet Virtual Function" },
 };
 
 static const struct pci_device_id bnxt_pci_tbl[] = {
+       { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
        { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
        { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
        { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
@@ -160,13 +160,19 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
        { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
        { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
        { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
+       { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
        { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
        { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
 #ifdef CONFIG_BNXT_SRIOV
-       { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
-       { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
-       { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF },
-       { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
+       { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
 #endif
        { 0 }
 };
@@ -189,8 +195,7 @@ static const u16 bnxt_async_events_arr[] = {
 
 static bool bnxt_vf_pciid(enum board_idx idx)
 {
-       return (idx == BCM57304_VF || idx == BCM57404_VF ||
-               idx == BCM57314_VF || idx == BCM57414_VF);
+       return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
 }
 
 #define DB_CP_REARM_FLAGS      (DB_KEY_CP | DB_IDX_VALID)
@@ -3419,10 +3424,10 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
        if (set_rss) {
-               vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
-                                BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
-                                BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
-                                BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+               vnic->hash_type = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+                                 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+                                 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+                                 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
 
                req.hash_type = cpu_to_le32(vnic->hash_type);
 
@@ -4156,6 +4161,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
        if (rc)
                goto hwrm_func_qcaps_exit;
 
+       bp->tx_push_thresh = 0;
+       if (resp->flags &
+           cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+               bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
        if (BNXT_PF(bp)) {
                struct bnxt_pf_info *pf = &bp->pf;
 
@@ -4187,12 +4197,6 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
                struct bnxt_vf_info *vf = &bp->vf;
 
                vf->fw_fid = le16_to_cpu(resp->fid);
-               memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
-               if (is_valid_ether_addr(vf->mac_addr))
-                       /* overwrite netdev dev_adr with admin VF MAC */
-                       memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
-               else
-                       random_ether_addr(bp->dev->dev_addr);
 
                vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
                vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -4204,14 +4208,21 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
                vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
                vf->max_vnics = le16_to_cpu(resp->max_vnics);
                vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+               memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
+               mutex_unlock(&bp->hwrm_cmd_lock);
+
+               if (is_valid_ether_addr(vf->mac_addr)) {
+                       /* overwrite netdev dev_adr with admin VF MAC */
+                       memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+               } else {
+                       random_ether_addr(bp->dev->dev_addr);
+                       rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+               }
+               return rc;
 #endif
        }
 
-       bp->tx_push_thresh = 0;
-       if (resp->flags &
-           cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
-               bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
-
 hwrm_func_qcaps_exit:
        mutex_unlock(&bp->hwrm_cmd_lock);
        return rc;
@@ -4249,6 +4260,9 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
        if (bp->max_tc > BNXT_MAX_QUEUE)
                bp->max_tc = BNXT_MAX_QUEUE;
 
+       if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
+               bp->max_tc = 1;
+
        qptr = &resp->queue_id0;
        for (i = 0; i < bp->max_tc; i++) {
                bp->q_info[i].queue_id = *qptr++;
@@ -4307,6 +4321,31 @@ hwrm_ver_get_exit:
        return rc;
 }
 
+int bnxt_hwrm_fw_set_time(struct bnxt *bp)
+{
+#if IS_ENABLED(CONFIG_RTC_LIB)
+       struct hwrm_fw_set_time_input req = {0};
+       struct rtc_time tm;
+       struct timeval tv;
+
+       if (bp->hwrm_spec_code < 0x10400)
+               return -EOPNOTSUPP;
+
+       do_gettimeofday(&tv);
+       rtc_time_to_tm(tv.tv_sec, &tm);
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
+       req.year = cpu_to_le16(1900 + tm.tm_year);
+       req.month = 1 + tm.tm_mon;
+       req.day = tm.tm_mday;
+       req.hour = tm.tm_hour;
+       req.minute = tm.tm_min;
+       req.second = tm.tm_sec;
+       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+#else
+       return -EOPNOTSUPP;
+#endif
+}
+
 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
 {
        int rc;
@@ -6804,6 +6843,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                goto init_err;
 
+       bnxt_hwrm_fw_set_time(bp);
+
        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
                           NETIF_F_TSO | NETIF_F_TSO6 |
                           NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
index 23e04a6..51b164a 100644 (file)
 #define BNXT_H
 
 #define DRV_MODULE_NAME                "bnxt_en"
-#define DRV_MODULE_VERSION     "1.3.0"
+#define DRV_MODULE_VERSION     "1.5.0"
 
 #define DRV_VER_MAJ    1
-#define DRV_VER_MIN    3
+#define DRV_VER_MIN    5
 #define DRV_VER_UPD    0
 
 struct tx_bd {
@@ -106,11 +106,11 @@ struct tx_cmp {
         #define CMP_TYPE_REMOTE_DRIVER_REQ                      34
         #define CMP_TYPE_REMOTE_DRIVER_RESP                     36
         #define CMP_TYPE_ERROR_STATUS                           48
-        #define CMPL_BASE_TYPE_STAT_EJECT                       (0x1aUL << 0)
-        #define CMPL_BASE_TYPE_HWRM_DONE                        (0x20UL << 0)
-        #define CMPL_BASE_TYPE_HWRM_FWD_REQ                     (0x22UL << 0)
-        #define CMPL_BASE_TYPE_HWRM_FWD_RESP                    (0x24UL << 0)
-        #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT                 (0x2eUL << 0)
+        #define CMPL_BASE_TYPE_STAT_EJECT                       0x1aUL
+        #define CMPL_BASE_TYPE_HWRM_DONE                        0x20UL
+        #define CMPL_BASE_TYPE_HWRM_FWD_REQ                     0x22UL
+        #define CMPL_BASE_TYPE_HWRM_FWD_RESP                    0x24UL
+        #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT                 0x2eUL
 
        #define TX_CMP_FLAGS_ERROR                              (1 << 6)
        #define TX_CMP_FLAGS_PUSH                               (1 << 7)
@@ -389,11 +389,6 @@ struct rx_tpa_end_cmp_ext {
 
 #define INVALID_HW_RING_ID     ((u16)-1)
 
-#define BNXT_RSS_HASH_TYPE_FLAG_IPV4           0x01
-#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4       0x02
-#define BNXT_RSS_HASH_TYPE_FLAG_IPV6           0x04
-#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6       0x08
-
 /* The hardware supports certain page sizes.  Use the supported page sizes
  * to allocate the rings.
  */
@@ -418,7 +413,7 @@ struct rx_tpa_end_cmp_ext {
 
 #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
 
-#define BNXT_MIN_PKT_SIZE      45
+#define BNXT_MIN_PKT_SIZE      52
 
 #define BNXT_NUM_TESTS(bp)     0
 
@@ -1225,6 +1220,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
 int bnxt_hwrm_func_qcaps(struct bnxt *);
 int bnxt_hwrm_set_pause(struct bnxt *);
 int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_hwrm_fw_set_time(struct bnxt *);
 int bnxt_open_nic(struct bnxt *, bool, bool);
 int bnxt_close_nic(struct bnxt *, bool, bool);
 int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
index b83e174..a7e04ff 100644 (file)
@@ -21,6 +21,8 @@
 #include "bnxt_nvm_defs.h"     /* NVRAM content constant and structure defs */
 #include "bnxt_fw_hdr.h"       /* Firmware hdr constant and structure defs */
 #define FLASH_NVRAM_TIMEOUT    ((HWRM_CMD_TIMEOUT) * 100)
+#define FLASH_PACKAGE_TIMEOUT  ((HWRM_CMD_TIMEOUT) * 200)
+#define INSTALL_PACKAGE_TIMEOUT        ((HWRM_CMD_TIMEOUT) * 200)
 
 static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
 
@@ -346,7 +348,7 @@ static void bnxt_get_channels(struct net_device *dev,
        int max_rx_rings, max_tx_rings, tcs;
 
        bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
-       channel->max_combined = max_rx_rings;
+       channel->max_combined = max_t(int, max_rx_rings, max_tx_rings);
 
        if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
                max_rx_rings = 0;
@@ -404,8 +406,8 @@ static int bnxt_set_channels(struct net_device *dev,
        if (tcs > 1)
                max_tx_rings /= tcs;
 
-       if (sh && (channel->combined_count > max_rx_rings ||
-                  channel->combined_count > max_tx_rings))
+       if (sh &&
+           channel->combined_count > max_t(int, max_rx_rings, max_tx_rings))
                return -ENOMEM;
 
        if (!sh && (channel->rx_count > max_rx_rings ||
@@ -428,8 +430,10 @@ static int bnxt_set_channels(struct net_device *dev,
 
        if (sh) {
                bp->flags |= BNXT_FLAG_SHARED_RINGS;
-               bp->rx_nr_rings = channel->combined_count;
-               bp->tx_nr_rings_per_tc = channel->combined_count;
+               bp->rx_nr_rings = min_t(int, channel->combined_count,
+                                       max_rx_rings);
+               bp->tx_nr_rings_per_tc = min_t(int, channel->combined_count,
+                                              max_tx_rings);
        } else {
                bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
                bp->rx_nr_rings = channel->rx_count;
@@ -1028,6 +1032,10 @@ static u32 bnxt_get_link(struct net_device *dev)
        return bp->link_info.link_up;
 }
 
+static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+                               u16 ext, u16 *index, u32 *item_length,
+                               u32 *data_length);
+
 static int bnxt_flash_nvram(struct net_device *dev,
                            u16 dir_type,
                            u16 dir_ordinal,
@@ -1179,7 +1187,6 @@ static int bnxt_flash_firmware(struct net_device *dev,
                           (unsigned long)calculated_crc);
                return -EINVAL;
        }
-       /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
        rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
                              0, 0, fw_data, fw_size);
        if (rc == 0)    /* Firmware update successful */
@@ -1188,6 +1195,57 @@ static int bnxt_flash_firmware(struct net_device *dev,
        return rc;
 }
 
+static int bnxt_flash_microcode(struct net_device *dev,
+                               u16 dir_type,
+                               const u8 *fw_data,
+                               size_t fw_size)
+{
+       struct bnxt_ucode_trailer *trailer;
+       u32 calculated_crc;
+       u32 stored_crc;
+       int rc = 0;
+
+       if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
+               netdev_err(dev, "Invalid microcode file size: %u\n",
+                          (unsigned int)fw_size);
+               return -EINVAL;
+       }
+       trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
+                                               sizeof(*trailer)));
+       if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
+               netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
+                          le32_to_cpu(trailer->sig));
+               return -EINVAL;
+       }
+       if (le16_to_cpu(trailer->dir_type) != dir_type) {
+               netdev_err(dev, "Expected microcode type: %d, read: %d\n",
+                          dir_type, le16_to_cpu(trailer->dir_type));
+               return -EINVAL;
+       }
+       if (le16_to_cpu(trailer->trailer_length) <
+               sizeof(struct bnxt_ucode_trailer)) {
+               netdev_err(dev, "Invalid microcode trailer length: %d\n",
+                          le16_to_cpu(trailer->trailer_length));
+               return -EINVAL;
+       }
+
+       /* Confirm the CRC32 checksum of the file: */
+       stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+                                            sizeof(stored_crc)));
+       calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+       if (calculated_crc != stored_crc) {
+               netdev_err(dev,
+                          "CRC32 (%08lX) does not match calculated: %08lX\n",
+                          (unsigned long)stored_crc,
+                          (unsigned long)calculated_crc);
+               return -EINVAL;
+       }
+       rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+                             0, 0, fw_data, fw_size);
+
+       return rc;
+}
+
 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
 {
        switch (dir_type) {
@@ -1206,7 +1264,7 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
        return false;
 }
 
-static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
 {
        switch (dir_type) {
        case BNX_DIR_TYPE_AVS:
@@ -1227,7 +1285,7 @@ static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
 static bool bnxt_dir_type_is_executable(u16 dir_type)
 {
        return bnxt_dir_type_is_ape_bin_format(dir_type) ||
-               bnxt_dir_type_is_unprotected_exec_format(dir_type);
+               bnxt_dir_type_is_other_exec_format(dir_type);
 }
 
 static int bnxt_flash_firmware_from_file(struct net_device *dev,
@@ -1237,10 +1295,6 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
        const struct firmware  *fw;
        int                     rc;
 
-       if (dir_type != BNX_DIR_TYPE_UPDATE &&
-           bnxt_dir_type_is_executable(dir_type) == false)
-               return -EINVAL;
-
        rc = request_firmware(&fw, filename, &dev->dev);
        if (rc != 0) {
                netdev_err(dev, "Error %d requesting firmware file: %s\n",
@@ -1249,6 +1303,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
        }
        if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
                rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+       else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
+               rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
        else
                rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
                                      0, 0, fw->data, fw->size);
@@ -1257,10 +1313,83 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
 }
 
 static int bnxt_flash_package_from_file(struct net_device *dev,
-                                       char *filename)
+                                       char *filename, u32 install_type)
 {
-       netdev_err(dev, "packages are not yet supported\n");
-       return -EINVAL;
+       struct bnxt *bp = netdev_priv(dev);
+       struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_nvm_install_update_input install = {0};
+       const struct firmware *fw;
+       u32 item_len;
+       u16 index;
+       int rc;
+
+       bnxt_hwrm_fw_set_time(bp);
+
+       if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+                                BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+                                &index, &item_len, NULL) != 0) {
+               netdev_err(dev, "PKG update area not created in nvram\n");
+               return -ENOBUFS;
+       }
+
+       rc = request_firmware(&fw, filename, &dev->dev);
+       if (rc != 0) {
+               netdev_err(dev, "PKG error %d requesting file: %s\n",
+                          rc, filename);
+               return rc;
+       }
+
+       if (fw->size > item_len) {
+               netdev_err(dev, "PKG insufficient update area in nvram: %lu",
+                          (unsigned long)fw->size);
+               rc = -EFBIG;
+       } else {
+               dma_addr_t dma_handle;
+               u8 *kmem;
+               struct hwrm_nvm_modify_input modify = {0};
+
+               bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
+
+               modify.dir_idx = cpu_to_le16(index);
+               modify.len = cpu_to_le32(fw->size);
+
+               kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
+                                         &dma_handle, GFP_KERNEL);
+               if (!kmem) {
+                       netdev_err(dev,
+                                  "dma_alloc_coherent failure, length = %u\n",
+                                  (unsigned int)fw->size);
+                       rc = -ENOMEM;
+               } else {
+                       memcpy(kmem, fw->data, fw->size);
+                       modify.host_src_addr = cpu_to_le64(dma_handle);
+
+                       rc = hwrm_send_message(bp, &modify, sizeof(modify),
+                                              FLASH_PACKAGE_TIMEOUT);
+                       dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
+                                         dma_handle);
+               }
+       }
+       release_firmware(fw);
+       if (rc)
+               return rc;
+
+       if ((install_type & 0xffff) == 0)
+               install_type >>= 16;
+       bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
+       install.install_type = cpu_to_le32(install_type);
+
+       rc = hwrm_send_message(bp, &install, sizeof(install),
+                              INSTALL_PACKAGE_TIMEOUT);
+       if (rc)
+               return -EOPNOTSUPP;
+
+       if (resp->result) {
+               netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
+                          (s8)resp->result, (int)resp->problem_item);
+               return -ENOPKG;
+       }
+       return 0;
 }
 
 static int bnxt_flash_device(struct net_device *dev,
@@ -1271,8 +1400,10 @@ static int bnxt_flash_device(struct net_device *dev,
                return -EINVAL;
        }
 
-       if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
-               return bnxt_flash_package_from_file(dev, flash->data);
+       if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
+           flash->region > 0xffff)
+               return bnxt_flash_package_from_file(dev, flash->data,
+                                                   flash->region);
 
        return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
 }
@@ -1516,7 +1647,7 @@ static int bnxt_set_eeprom(struct net_device *dev,
 
        /* Create or re-write an NVM item: */
        if (bnxt_dir_type_is_executable(type) == true)
-               return -EINVAL;
+               return -EOPNOTSUPP;
        ext = eeprom->magic & 0xffff;
        ordinal = eeprom->offset >> 16;
        attr = eeprom->offset & 0xffff;
@@ -1718,6 +1849,25 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
        return rc;
 }
 
+static int bnxt_nway_reset(struct net_device *dev)
+{
+       int rc = 0;
+
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_link_info *link_info = &bp->link_info;
+
+       if (!BNXT_SINGLE_PF(bp))
+               return -EOPNOTSUPP;
+
+       if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
+               return -EINVAL;
+
+       if (netif_running(dev))
+               rc = bnxt_hwrm_set_link_setting(bp, true, false);
+
+       return rc;
+}
+
 const struct ethtool_ops bnxt_ethtool_ops = {
        .get_link_ksettings     = bnxt_get_link_ksettings,
        .set_link_ksettings     = bnxt_set_link_ksettings,
@@ -1750,4 +1900,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
        .set_eee                = bnxt_set_eee,
        .get_module_info        = bnxt_get_module_info,
        .get_module_eeprom      = bnxt_get_module_eeprom,
+       .nway_reset             = bnxt_nway_reset
 };
index 82bf44a..cad30dd 100644 (file)
@@ -11,6 +11,7 @@
 #define __BNXT_FW_HDR_H__
 
 #define BNXT_FIRMWARE_BIN_SIGNATURE     0x1a4d4342     /* "BCM"+0x1a */
+#define BNXT_UCODE_TRAILER_SIGNATURE   0x726c7254      /* "Trlr" */
 
 enum SUPPORTED_FAMILY {
        DEVICE_5702_3_4_FAMILY,         /* 0  - Denali, Vinson, K2 */
@@ -85,7 +86,7 @@ enum SUPPORTED_MEDIA {
 
 struct bnxt_fw_header {
        __le32 signature;       /* constains the constant value of
-                                * BNXT_Firmware_Bin_Signatures
+                                * BNXT_FIRMWARE_BIN_SIGNATURE
                                 */
        u8 flags;               /* reserved for ChiMP use */
        u8 code_type;           /* enum SUPPORTED_CODE */
@@ -102,4 +103,17 @@ struct bnxt_fw_header {
        u8 major_ver;
 };
 
+/* Microcode and pre-boot software/firmware trailer: */
+struct bnxt_ucode_trailer {
+       u8 rsa_sig[256];
+       __le16 flags;
+       u8 version_format;
+       u8 version_length;
+       u8 version[16];
+       __le16 dir_type;
+       __le16 trailer_length;
+       __le32 sig;             /* BNXT_UCODE_TRAILER_SIGNATURE */
+       __le32 chksum;          /* CRC-32 */
+};
+
 #endif
index 517567f..04a96cc 100644 (file)
@@ -39,7 +39,7 @@ struct eject_cmpl {
        __le16 type;
        #define EJECT_CMPL_TYPE_MASK                                0x3fUL
        #define EJECT_CMPL_TYPE_SFT                                 0
-       #define EJECT_CMPL_TYPE_STAT_EJECT                         (0x1aUL << 0)
+       #define EJECT_CMPL_TYPE_STAT_EJECT                         0x1aUL
        __le16 len;
        __le32 opaque;
        __le32 v;
@@ -52,7 +52,7 @@ struct hwrm_cmpl {
        __le16 type;
        #define HWRM_CMPL_TYPE_MASK                                 0x3fUL
        #define HWRM_CMPL_TYPE_SFT                                  0
-       #define HWRM_CMPL_TYPE_HWRM_DONE                           (0x20UL << 0)
+       #define HWRM_CMPL_TYPE_HWRM_DONE                           0x20UL
        __le16 sequence_id;
        __le32 unused_1;
        __le32 v;
@@ -65,7 +65,7 @@ struct hwrm_fwd_req_cmpl {
        __le16 req_len_type;
        #define HWRM_FWD_REQ_CMPL_TYPE_MASK                         0x3fUL
        #define HWRM_FWD_REQ_CMPL_TYPE_SFT                          0
-       #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ                (0x22UL << 0)
+       #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ                0x22UL
        #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK                      0xffc0UL
        #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT                       6
        __le16 source_id;
@@ -81,7 +81,7 @@ struct hwrm_fwd_resp_cmpl {
        __le16 type;
        #define HWRM_FWD_RESP_CMPL_TYPE_MASK                        0x3fUL
        #define HWRM_FWD_RESP_CMPL_TYPE_SFT                         0
-       #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP              (0x24UL << 0)
+       #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP              0x24UL
        __le16 source_id;
        __le16 resp_len;
        __le16 unused_1;
@@ -96,25 +96,26 @@ struct hwrm_async_event_cmpl {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK             0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT                      0
-       #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT       (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT       0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE    (0x1UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE  (0x2UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE  (0x3UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD   (0x10UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD     (0x11UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD     (0x20UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD       (0x21UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR              (0x30UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE      (0x33UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR          (0xffUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE    0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE  0x2UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE  0x3UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD   0x10UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD     0x11UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD     0x20UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD       0x21UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR              0x30UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE      0x33UL
+       #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR          0xffUL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_V                     0x1UL
@@ -130,9 +131,9 @@ struct hwrm_async_event_cmpl_link_status_change {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT  0
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V          0x1UL
@@ -156,9 +157,9 @@ struct hwrm_async_event_cmpl_link_mtu_change {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK    0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT     0
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V     0x1UL
@@ -176,9 +177,9 @@ struct hwrm_async_event_cmpl_link_speed_change {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK  0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT   0
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V           0x1UL
@@ -200,8 +201,7 @@ struct hwrm_async_event_cmpl_link_speed_change {
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1)
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST    HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST    HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
 };
@@ -211,9 +211,9 @@ struct hwrm_async_event_cmpl_dcb_config_change {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK  0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT   0
-       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V           0x1UL
@@ -231,9 +231,9 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
-       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V      0x1UL
@@ -258,9 +258,9 @@ struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
@@ -278,9 +278,9 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V      0x1UL
@@ -300,9 +300,9 @@ struct hwrm_async_event_cmpl_func_drvr_unload {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK   0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT    0
-       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V            0x1UL
@@ -320,9 +320,9 @@ struct hwrm_async_event_cmpl_func_drvr_load {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK     0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT      0
-       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V              0x1UL
@@ -340,9 +340,9 @@ struct hwrm_async_event_cmpl_pf_drvr_unload {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK     0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT      0
-       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V              0x1UL
@@ -362,9 +362,9 @@ struct hwrm_async_event_cmpl_pf_drvr_load {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK       0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT         0
-       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V                0x1UL
@@ -384,9 +384,9 @@ struct hwrm_async_event_cmpl_vf_flr {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK              0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT               0
-       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR      (0x30UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR      0x30UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V                      0x1UL
@@ -404,9 +404,9 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT  0
-       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V          0x1UL
@@ -424,9 +424,9 @@ struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
-       #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V   0x1UL
@@ -443,9 +443,9 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK      0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT       0
-       #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
        __le32 event_data2;
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V               0x1UL
@@ -465,15 +465,15 @@ struct hwrm_async_event_cmpl_hwrm_error {
        __le16 type;
        #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK          0x3fUL
        #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT           0
-       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
        __le16 event_id;
-       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
        __le32 event_data2;
        #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
        #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
-       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
-       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+       #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
        #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST    HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
        u8 opaque_v;
        #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V                  0x1UL
@@ -485,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
        #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
 };
 
-/* HW Resource Manager Specification 1.3.0 */
+/* HW Resource Manager Specification 1.5.1 */
 #define HWRM_VERSION_MAJOR     1
-#define HWRM_VERSION_MINOR     3
-#define HWRM_VERSION_UPDATE    0
+#define HWRM_VERSION_MINOR     5
+#define HWRM_VERSION_UPDATE    1
 
-#define HWRM_VERSION_STR       "1.3.0"
+#define HWRM_VERSION_STR       "1.5.1"
 /*
  * Following is the signature for HWRM message field that indicates not
  * applicable (All F's). Need to cast it the size of the field if needed.
@@ -556,8 +556,8 @@ struct cmd_nums {
        #define HWRM_QUEUE_QPORTCFG                                (0x30UL)
        #define HWRM_QUEUE_QCFG                            (0x31UL)
        #define HWRM_QUEUE_CFG                                     (0x32UL)
-       #define HWRM_QUEUE_BUFFERS_QCFG                    (0x33UL)
-       #define HWRM_QUEUE_BUFFERS_CFG                             (0x34UL)
+       #define RESERVED2                                          (0x33UL)
+       #define RESERVED3                                          (0x34UL)
        #define HWRM_QUEUE_PFCENABLE_QCFG                          (0x35UL)
        #define HWRM_QUEUE_PFCENABLE_CFG                           (0x36UL)
        #define HWRM_QUEUE_PRI2COS_QCFG                    (0x37UL)
@@ -574,6 +574,7 @@ struct cmd_nums {
        #define HWRM_VNIC_RSS_QCFG                                 (0x47UL)
        #define HWRM_VNIC_PLCMODES_CFG                             (0x48UL)
        #define HWRM_VNIC_PLCMODES_QCFG                    (0x49UL)
+       #define HWRM_VNIC_QCAPS                            (0x4aUL)
        #define HWRM_RING_ALLOC                            (0x50UL)
        #define HWRM_RING_FREE                                     (0x51UL)
        #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS                 (0x52UL)
@@ -581,13 +582,15 @@ struct cmd_nums {
        #define HWRM_RING_RESET                            (0x5eUL)
        #define HWRM_RING_GRP_ALLOC                                (0x60UL)
        #define HWRM_RING_GRP_FREE                                 (0x61UL)
+       #define RESERVED5                                          (0x64UL)
+       #define RESERVED6                                          (0x65UL)
        #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC                     (0x70UL)
        #define HWRM_VNIC_RSS_COS_LB_CTX_FREE                      (0x71UL)
        #define HWRM_CFA_L2_FILTER_ALLOC                           (0x90UL)
        #define HWRM_CFA_L2_FILTER_FREE                    (0x91UL)
        #define HWRM_CFA_L2_FILTER_CFG                             (0x92UL)
        #define HWRM_CFA_L2_SET_RX_MASK                    (0x93UL)
-       #define RESERVED3                                          (0x94UL)
+       #define RESERVED4                                          (0x94UL)
        #define HWRM_CFA_TUNNEL_FILTER_ALLOC                       (0x95UL)
        #define HWRM_CFA_TUNNEL_FILTER_FREE                        (0x96UL)
        #define HWRM_CFA_ENCAP_RECORD_ALLOC                        (0x97UL)
@@ -607,6 +610,8 @@ struct cmd_nums {
        #define HWRM_STAT_CTX_CLR_STATS                    (0xb3UL)
        #define HWRM_FW_RESET                                      (0xc0UL)
        #define HWRM_FW_QSTATUS                            (0xc1UL)
+       #define HWRM_FW_SET_TIME                                   (0xc8UL)
+       #define HWRM_FW_GET_TIME                                   (0xc9UL)
        #define HWRM_EXEC_FWD_RESP                                 (0xd0UL)
        #define HWRM_REJECT_FWD_RESP                               (0xd1UL)
        #define HWRM_FWD_RESP                                      (0xd2UL)
@@ -615,11 +620,13 @@ struct cmd_nums {
        #define HWRM_WOL_FILTER_ALLOC                              (0xf0UL)
        #define HWRM_WOL_FILTER_FREE                               (0xf1UL)
        #define HWRM_WOL_FILTER_QCFG                               (0xf2UL)
+       #define HWRM_WOL_REASON_QCFG                               (0xf3UL)
        #define HWRM_DBG_READ_DIRECT                               (0xff10UL)
        #define HWRM_DBG_READ_INDIRECT                             (0xff11UL)
        #define HWRM_DBG_WRITE_DIRECT                              (0xff12UL)
        #define HWRM_DBG_WRITE_INDIRECT                    (0xff13UL)
        #define HWRM_DBG_DUMP                                      (0xff14UL)
+       #define HWRM_NVM_INSTALL_UPDATE                    (0xfff3UL)
        #define HWRM_NVM_MODIFY                            (0xfff4UL)
        #define HWRM_NVM_VERIFY_UPDATE                             (0xfff5UL)
        #define HWRM_NVM_GET_DEV_INFO                              (0xfff6UL)
@@ -824,7 +831,9 @@ struct hwrm_ver_get_output {
        u8 netctrl_fw_min;
        u8 netctrl_fw_bld;
        u8 netctrl_fw_rsvd;
-       __le32 reserved1;
+       __le32 dev_caps_cfg;
+       #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED  0x1UL
+       #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED  0x2UL
        u8 roce_fw_maj;
        u8 roce_fw_min;
        u8 roce_fw_bld;
@@ -839,9 +848,9 @@ struct hwrm_ver_get_output {
        u8 chip_metal;
        u8 chip_bond_id;
        u8 chip_platform_type;
-       #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC               (0x0UL << 0)
-       #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA               (0x1UL << 0)
-       #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM          (0x2UL << 0)
+       #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC               0x0UL
+       #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA               0x1UL
+       #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM          0x2UL
        __le16 max_req_win_len;
        __le16 max_resp_len;
        __le16 def_req_timeout;
@@ -863,10 +872,10 @@ struct hwrm_func_reset_input {
        #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID                  0x1UL
        __le16 vf_id;
        u8 func_reset_level;
-       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL           (0x0UL << 0)
-       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME    (0x1UL << 0)
-       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN     (0x2UL << 0)
-       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF    (0x3UL << 0)
+       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL           0x0UL
+       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME    0x1UL
+       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN     0x2UL
+       #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF    0x3UL
        u8 unused_0;
 };
 
@@ -1028,6 +1037,10 @@ struct hwrm_func_qcaps_output {
        #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED     0x10UL
        #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED       0x20UL
        #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED     0x40UL
+       #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED          0x80UL
+       #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED           0x100UL
+       #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED      0x200UL
+       #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED           0x400UL
        u8 mac_address[6];
        __le16 max_rsscos_ctx;
        __le16 max_cmpl_rings;
@@ -1047,9 +1060,8 @@ struct hwrm_func_qcaps_output {
        __le32 max_mcast_filters;
        __le32 max_flow_id;
        __le32 max_hw_ring_grps;
+       __le16 max_sp_tx_rings;
        u8 unused_0;
-       u8 unused_1;
-       u8 unused_2;
        u8 valid;
 };
 
@@ -1077,6 +1089,7 @@ struct hwrm_func_qcfg_output {
        __le16 flags;
        #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED      0x1UL
        #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED            0x2UL
+       #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED          0x4UL
        u8 mac_address[6];
        __le16 pci_id;
        __le16 alloc_rsscos_ctx;
@@ -1089,29 +1102,46 @@ struct hwrm_func_qcfg_output {
        __le16 mru;
        __le16 stat_ctx_id;
        u8 port_partition_type;
-       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF             (0x0UL << 0)
-       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS    (0x1UL << 0)
-       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0         (0x2UL << 0)
-       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5         (0x3UL << 0)
-       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0         (0x4UL << 0)
-       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN         (0xffUL << 0)
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF             0x0UL
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS    0x1UL
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0         0x2UL
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5         0x3UL
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0         0x4UL
+       #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN         0xffUL
        u8 unused_0;
        __le16 dflt_vnic_id;
        u8 unused_1;
        u8 unused_2;
        __le32 min_bw;
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK                 0xfffffffUL
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT                  0
+       #define FUNC_QCFG_RESP_MIN_BW_RSVD                          0x10000000UL
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK            0xe0000000UL
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT     29
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MBPS           (0x0UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST    FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 max_bw;
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK                 0xfffffffUL
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT                  0
+       #define FUNC_QCFG_RESP_MAX_BW_RSVD                          0x10000000UL
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK            0xe0000000UL
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT     29
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MBPS           (0x0UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST    FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
        u8 evb_mode;
-       #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB                     (0x0UL << 0)
-       #define FUNC_QCFG_RESP_EVB_MODE_VEB                        (0x1UL << 0)
-       #define FUNC_QCFG_RESP_EVB_MODE_VEPA                       (0x2UL << 0)
+       #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB                     0x0UL
+       #define FUNC_QCFG_RESP_EVB_MODE_VEB                        0x1UL
+       #define FUNC_QCFG_RESP_EVB_MODE_VEPA                       0x2UL
        u8 unused_3;
-       __le16 unused_4;
+       __le16 alloc_vfs;
        __le32 alloc_mcast_filters;
        __le32 alloc_hw_ring_grps;
-       u8 unused_5;
-       u8 unused_6;
-       u8 unused_7;
+       __le16 alloc_sp_tx_rings;
+       u8 unused_4;
        u8 valid;
 };
 
@@ -1171,18 +1201,36 @@ struct hwrm_func_cfg_input {
        __le16 dflt_vlan;
        __be32 dflt_ip_addr[4];
        __le32 min_bw;
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK                   0xfffffffUL
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT                    0
+       #define FUNC_CFG_REQ_MIN_BW_RSVD                            0x10000000UL
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK              0xe0000000UL
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT               29
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MBPS             (0x0UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100    (0x1UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID          (0x7UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST    FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 max_bw;
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK                   0xfffffffUL
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT                    0
+       #define FUNC_CFG_REQ_MAX_BW_RSVD                            0x10000000UL
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK              0xe0000000UL
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT               29
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MBPS             (0x0UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100    (0x1UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID          (0x7UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST    FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
        __le16 async_event_cr;
        u8 vlan_antispoof_mode;
-       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK           (0x0UL << 0)
-       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN    (0x1UL << 0)
-       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
-       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK           0x0UL
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN    0x1UL
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+       #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
        u8 allowed_vlan_pris;
        u8 evb_mode;
-       #define FUNC_CFG_REQ_EVB_MODE_NO_EVB                       (0x0UL << 0)
-       #define FUNC_CFG_REQ_EVB_MODE_VEB                          (0x1UL << 0)
-       #define FUNC_CFG_REQ_EVB_MODE_VEPA                         (0x2UL << 0)
+       #define FUNC_CFG_REQ_EVB_MODE_NO_EVB                       0x0UL
+       #define FUNC_CFG_REQ_EVB_MODE_VEB                          0x1UL
+       #define FUNC_CFG_REQ_EVB_MODE_VEPA                         0x2UL
        u8 unused_2;
        __le16 num_mcast_filters;
 };
@@ -1341,16 +1389,16 @@ struct hwrm_func_drv_rgtr_input {
        #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD                0x8UL
        #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD           0x10UL
        __le16 os_type;
-       #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN                  (0x0UL << 0)
-       #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER            (0x1UL << 0)
-       #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS            (0xeUL << 0)
-       #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS                  (0x12UL << 0)
-       #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS                  (0x1dUL << 0)
-       #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX            (0x24UL << 0)
-       #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD                  (0x2aUL << 0)
-       #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI                     (0x68UL << 0)
-       #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864                   (0x73UL << 0)
-       #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2                (0x74UL << 0)
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN                  0x0UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER            0x1UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS            0xeUL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS                  0x12UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS                  0x1dUL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX            0x24UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD                  0x2aUL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI                     0x68UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864                   0x73UL
+       #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2                0x74UL
        u8 ver_maj;
        u8 ver_min;
        u8 ver_upd;
@@ -1415,13 +1463,13 @@ struct hwrm_func_buf_rgtr_input {
        __le16 vf_id;
        __le16 req_buf_num_pages;
        __le16 req_buf_page_size;
-       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B    (0x4UL << 0)
-       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K             (0xcUL << 0)
-       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K             (0xdUL << 0)
-       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K    (0x10UL << 0)
-       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M             (0x15UL << 0)
-       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M             (0x16UL << 0)
-       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G             (0x1eUL << 0)
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B    0x4UL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K             0xcUL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K             0xdUL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K    0x10UL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M             0x15UL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M             0x16UL
+       #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G             0x1eUL
        __le16 req_buf_len;
        __le16 resp_buf_len;
        u8 unused_0;
@@ -1473,16 +1521,16 @@ struct hwrm_func_drv_qver_output {
        __le16 seq_id;
        __le16 resp_len;
        __le16 os_type;
-       #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN                 (0x0UL << 0)
-       #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER                   (0x1UL << 0)
-       #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS                   (0xeUL << 0)
-       #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS                 (0x12UL << 0)
-       #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS                 (0x1dUL << 0)
-       #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX                   (0x24UL << 0)
-       #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD                 (0x2aUL << 0)
-       #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI            (0x68UL << 0)
-       #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864                  (0x73UL << 0)
-       #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2               (0x74UL << 0)
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN                 0x0UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER                   0x1UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS                   0xeUL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS                 0x12UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS                 0x1dUL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX                   0x24UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD                 0x2aUL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI            0x68UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864                  0x73UL
+       #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2               0x74UL
        u8 ver_maj;
        u8 ver_min;
        u8 ver_upd;
@@ -1528,44 +1576,44 @@ struct hwrm_port_phy_cfg_input {
        #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER               0x400UL
        __le16 port_id;
        __le16 force_link_speed;
-       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB    (0x1UL << 0)
-       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB              (0xaUL << 0)
-       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB              (0x14UL << 0)
-       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB    (0x19UL << 0)
-       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB             (0x64UL << 0)
-       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB             (0xc8UL << 0)
-       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB             (0xfaUL << 0)
-       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB             (0x190UL << 0)
-       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB             (0x1f4UL << 0)
-       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB    (0x3e8UL << 0)
-       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB             (0xffffUL << 0)
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB    0x1UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB              0xaUL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB              0x14UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB    0x19UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB             0x64UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB             0xc8UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB             0xfaUL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB             0x190UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB             0x1f4UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB    0x3e8UL
+       #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB             0xffffUL
        u8 auto_mode;
-       #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE            (0x0UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS              (0x1UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED               (0x2UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW    (0x3UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK              (0x4UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE            0x0UL
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS              0x1UL
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED               0x2UL
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW    0x3UL
+       #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK              0x4UL
        u8 auto_duplex;
-       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF                  (0x0UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL                  (0x1UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH                  (0x2UL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF                  0x0UL
+       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL                  0x1UL
+       #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH                  0x2UL
        u8 auto_pause;
        #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX                      0x1UL
        #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX                      0x2UL
        #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE           0x4UL
        u8 unused_0;
        __le16 auto_link_speed;
-       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB             (0x1UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB               (0xaUL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB               (0x14UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB             (0x19UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB              (0x64UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB              (0xc8UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB              (0xfaUL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB              (0x190UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB              (0x1f4UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB             (0x3e8UL << 0)
-       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB              (0xffffUL << 0)
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB             0x1UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB               0xaUL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB               0x14UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB             0x19UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB              0x64UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB              0xc8UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB              0xfaUL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB              0x190UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB              0x1f4UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB             0x3e8UL
+       #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB              0xffffUL
        __le16 auto_link_speed_mask;
        #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD      0x1UL
        #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB         0x2UL
@@ -1582,12 +1630,12 @@ struct hwrm_port_phy_cfg_input {
        #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD       0x1000UL
        #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB          0x2000UL
        u8 wirespeed;
-       #define PORT_PHY_CFG_REQ_WIRESPEED_OFF                     (0x0UL << 0)
-       #define PORT_PHY_CFG_REQ_WIRESPEED_ON                      (0x1UL << 0)
+       #define PORT_PHY_CFG_REQ_WIRESPEED_OFF                     0x0UL
+       #define PORT_PHY_CFG_REQ_WIRESPEED_ON                      0x1UL
        u8 lpbk;
-       #define PORT_PHY_CFG_REQ_LPBK_NONE                         (0x0UL << 0)
-       #define PORT_PHY_CFG_REQ_LPBK_LOCAL                        (0x1UL << 0)
-       #define PORT_PHY_CFG_REQ_LPBK_REMOTE                       (0x2UL << 0)
+       #define PORT_PHY_CFG_REQ_LPBK_NONE                         0x0UL
+       #define PORT_PHY_CFG_REQ_LPBK_LOCAL                        0x1UL
+       #define PORT_PHY_CFG_REQ_LPBK_REMOTE                       0x2UL
        u8 force_pause;
        #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX             0x1UL
        #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX             0x2UL
@@ -1641,25 +1689,25 @@ struct hwrm_port_phy_qcfg_output {
        __le16 seq_id;
        __le16 resp_len;
        u8 link;
-       #define PORT_PHY_QCFG_RESP_LINK_NO_LINK            (0x0UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_SIGNAL                     (0x1UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_LINK                       (0x2UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_NO_LINK            0x0UL
+       #define PORT_PHY_QCFG_RESP_LINK_SIGNAL                     0x1UL
+       #define PORT_PHY_QCFG_RESP_LINK_LINK                       0x2UL
        u8 unused_0;
        __le16 link_speed;
-       #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB                (0x1UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB                  (0xaUL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB                  (0x14UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB                (0x19UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB                 (0x64UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB                 (0xc8UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB                 (0xfaUL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB                 (0x190UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB                 (0x1f4UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB                (0x3e8UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB                 (0xffffUL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB                0x1UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB                  0xaUL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB                  0x14UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB                0x19UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB                 0x64UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB                 0xc8UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB                 0xfaUL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB                 0x190UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB                 0x1f4UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB                0x3e8UL
+       #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB                 0xffffUL
        u8 duplex;
-       #define PORT_PHY_QCFG_RESP_DUPLEX_HALF                     (0x0UL << 0)
-       #define PORT_PHY_QCFG_RESP_DUPLEX_FULL                     (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_DUPLEX_HALF                     0x0UL
+       #define PORT_PHY_QCFG_RESP_DUPLEX_FULL                     0x1UL
        u8 pause;
        #define PORT_PHY_QCFG_RESP_PAUSE_TX                         0x1UL
        #define PORT_PHY_QCFG_RESP_PAUSE_RX                         0x2UL
@@ -1679,39 +1727,39 @@ struct hwrm_port_phy_qcfg_output {
        #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD            0x1000UL
        #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB              0x2000UL
        __le16 force_link_speed;
-       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB          (0x1UL << 0)
-       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB    (0xaUL << 0)
-       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB    (0x14UL << 0)
-       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB          (0x19UL << 0)
-       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB           (0x64UL << 0)
-       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB           (0xc8UL << 0)
-       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB           (0xfaUL << 0)
-       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB           (0x190UL << 0)
-       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB           (0x1f4UL << 0)
-       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB          (0x3e8UL << 0)
-       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB           (0xffffUL << 0)
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB          0x1UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB    0xaUL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB    0x14UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB          0x19UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB           0x64UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB           0xc8UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB           0xfaUL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB           0x190UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB           0x1f4UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB          0x3e8UL
+       #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB           0xffffUL
        u8 auto_mode;
-       #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE                  (0x0UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS    (0x1UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED             (0x2UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW          (0x3UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK    (0x4UL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE                  0x0UL
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS    0x1UL
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED             0x2UL
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW          0x3UL
+       #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK    0x4UL
        u8 auto_pause;
        #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX                    0x1UL
        #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX                    0x2UL
        #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE         0x4UL
        __le16 auto_link_speed;
-       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB           (0x1UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB             (0xaUL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB             (0x14UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB           (0x19UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB    (0x64UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB    (0xc8UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB    (0xfaUL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB    (0x190UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB    (0x1f4UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB           (0x3e8UL << 0)
-       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB    (0xffffUL << 0)
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB           0x1UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB             0xaUL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB             0x14UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB           0x19UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB    0x64UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB    0xc8UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB    0xfaUL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB    0x190UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB    0x1f4UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB           0x3e8UL
+       #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB    0xffffUL
        __le16 auto_link_speed_mask;
        #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD    0x1UL
        #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB      0x2UL
@@ -1728,46 +1776,46 @@ struct hwrm_port_phy_qcfg_output {
        #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD     0x1000UL
        #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB       0x2000UL
        u8 wirespeed;
-       #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF                   (0x0UL << 0)
-       #define PORT_PHY_QCFG_RESP_WIRESPEED_ON            (0x1UL << 0)
+       #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF                   0x0UL
+       #define PORT_PHY_QCFG_RESP_WIRESPEED_ON            0x1UL
        u8 lpbk;
-       #define PORT_PHY_QCFG_RESP_LPBK_NONE                       (0x0UL << 0)
-       #define PORT_PHY_QCFG_RESP_LPBK_LOCAL                      (0x1UL << 0)
-       #define PORT_PHY_QCFG_RESP_LPBK_REMOTE                     (0x2UL << 0)
+       #define PORT_PHY_QCFG_RESP_LPBK_NONE                       0x0UL
+       #define PORT_PHY_QCFG_RESP_LPBK_LOCAL                      0x1UL
+       #define PORT_PHY_QCFG_RESP_LPBK_REMOTE                     0x2UL
        u8 force_pause;
        #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX                   0x1UL
        #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX                   0x2UL
        u8 module_status;
-       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE              (0x0UL << 0)
-       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX         (0x1UL << 0)
-       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG       (0x2UL << 0)
-       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN           (0x3UL << 0)
-       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED      (0x4UL << 0)
-       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE    (0xffUL << 0)
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE              0x0UL
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX         0x1UL
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG       0x2UL
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN           0x3UL
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED      0x4UL
+       #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE    0xffUL
        __le32 preemphasis;
        u8 phy_maj;
        u8 phy_min;
        u8 phy_bld;
        u8 phy_type;
-       #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN                (0x0UL << 0)
-       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR                 (0x1UL << 0)
-       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4                (0x2UL << 0)
-       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR                 (0x3UL << 0)
-       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR                 (0x4UL << 0)
-       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2                (0x5UL << 0)
-       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX                 (0x6UL << 0)
-       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR                 (0x7UL << 0)
-       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET                  (0x8UL << 0)
-       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE                 (0x9UL << 0)
-       #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY    (0xaUL << 0)
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN                0x0UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR                 0x1UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4                0x2UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR                 0x3UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR                 0x4UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2                0x5UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX                 0x6UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR                 0x7UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET                  0x8UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE                 0x9UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY    0xaUL
        u8 media_type;
-       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN              (0x0UL << 0)
-       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP                   (0x1UL << 0)
-       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC                  (0x2UL << 0)
-       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE                (0x3UL << 0)
+       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN              0x0UL
+       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP                   0x1UL
+       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC                  0x2UL
+       #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE                0x3UL
        u8 xcvr_pkg_type;
-       #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL    (0x1UL << 0)
-       #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL    (0x2UL << 0)
+       #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL    0x1UL
+       #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL    0x2UL
        u8 eee_config_phy_addr;
        #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK                    0x1fUL
        #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT             0
@@ -1796,11 +1844,11 @@ struct hwrm_port_phy_qcfg_output {
        #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD  0x1000UL
        #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB    0x2000UL
        u8 link_partner_adv_auto_mode;
-       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
-       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+       #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
        u8 link_partner_adv_pause;
        #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX       0x1UL
        #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX       0x2UL
@@ -1859,7 +1907,7 @@ struct hwrm_port_mac_cfg_input {
        __le64 resp_addr;
        __le32 flags;
        #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK                   0x1UL
-       #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE       0x2UL
+       #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE          0x2UL
        #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE       0x4UL
        #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE           0x8UL
        #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE    0x10UL
@@ -1868,28 +1916,50 @@ struct hwrm_port_mac_cfg_input {
        #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE   0x80UL
        #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE               0x100UL
        #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE              0x200UL
+       #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE         0x400UL
+       #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE      0x800UL
+       #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE          0x1000UL
        __le32 enables;
        #define PORT_MAC_CFG_REQ_ENABLES_IPG                        0x1UL
        #define PORT_MAC_CFG_REQ_ENABLES_LPBK                       0x2UL
-       #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI     0x4UL
-       #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI               0x8UL
+       #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI      0x4UL
+       #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1                  0x8UL
        #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI    0x10UL
        #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI           0x20UL
        #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
        #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+       #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG              0x100UL
        __le16 port_id;
        u8 ipg;
        u8 lpbk;
-       #define PORT_MAC_CFG_REQ_LPBK_NONE                         (0x0UL << 0)
-       #define PORT_MAC_CFG_REQ_LPBK_LOCAL                        (0x1UL << 0)
-       #define PORT_MAC_CFG_REQ_LPBK_REMOTE                       (0x2UL << 0)
-       u8 ivlan_pri2cos_map_pri;
-       u8 lcos_map_pri;
+       #define PORT_MAC_CFG_REQ_LPBK_NONE                         0x0UL
+       #define PORT_MAC_CFG_REQ_LPBK_LOCAL                        0x1UL
+       #define PORT_MAC_CFG_REQ_LPBK_REMOTE                       0x2UL
+       u8 vlan_pri2cos_map_pri;
+       u8 reserved1;
        u8 tunnel_pri2cos_map_pri;
        u8 dscp2pri_map_pri;
        __le16 rx_ts_capture_ptp_msg_type;
        __le16 tx_ts_capture_ptp_msg_type;
-       __le32 unused_0;
+       u8 cos_field_cfg;
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1                0x1UL
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK   0x6UL
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT    1
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST    PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT  3
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST    PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK    0xe0UL
+       #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT     5
+       u8 unused_0[3];
 };
 
 /* Output (16 bytes) */
@@ -1902,9 +1972,9 @@ struct hwrm_port_mac_cfg_output {
        __le16 mtu;
        u8 ipg;
        u8 lpbk;
-       #define PORT_MAC_CFG_RESP_LPBK_NONE                        (0x0UL << 0)
-       #define PORT_MAC_CFG_RESP_LPBK_LOCAL                       (0x1UL << 0)
-       #define PORT_MAC_CFG_RESP_LPBK_REMOTE                      (0x2UL << 0)
+       #define PORT_MAC_CFG_RESP_LPBK_NONE                        0x0UL
+       #define PORT_MAC_CFG_RESP_LPBK_LOCAL                       0x1UL
+       #define PORT_MAC_CFG_RESP_LPBK_REMOTE                      0x2UL
        u8 unused_0;
        u8 valid;
 };
@@ -2163,8 +2233,8 @@ struct hwrm_queue_qportcfg_input {
        __le64 resp_addr;
        __le32 flags;
        #define QUEUE_QPORTCFG_REQ_FLAGS_PATH                       0x1UL
-       #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX                   (0x0UL << 0)
-       #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX                   (0x1UL << 0)
+       #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX                   0x0UL
+       #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX                   0x1UL
        #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST    QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
        __le16 port_id;
        __le16 unused_0;
@@ -2179,50 +2249,51 @@ struct hwrm_queue_qportcfg_output {
        u8 max_configurable_queues;
        u8 max_configurable_lossless_queues;
        u8 queue_cfg_allowed;
-       u8 queue_buffers_cfg_allowed;
+       u8 queue_cfg_info;
+       #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG         0x1UL
        u8 queue_pfcenable_cfg_allowed;
        u8 queue_pri2cos_cfg_allowed;
        u8 queue_cos2bw_cfg_allowed;
        u8 queue_id0;
        u8 queue_id0_service_profile;
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
        u8 queue_id1;
        u8 queue_id1_service_profile;
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
        u8 queue_id2;
        u8 queue_id2_service_profile;
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
        u8 queue_id3;
        u8 queue_id3_service_profile;
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
        u8 queue_id4;
        u8 queue_id4_service_profile;
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
        u8 queue_id5;
        u8 queue_id5_service_profile;
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
        u8 queue_id6;
        u8 queue_id6_service_profile;
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
        u8 queue_id7;
        u8 queue_id7_service_profile;
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
-       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+       #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
        u8 valid;
 };
 
@@ -2235,19 +2306,21 @@ struct hwrm_queue_cfg_input {
        __le16 target_id;
        __le64 resp_addr;
        __le32 flags;
-       #define QUEUE_CFG_REQ_FLAGS_PATH                            0x1UL
-       #define QUEUE_CFG_REQ_FLAGS_PATH_TX                        (0x0UL << 0)
-       #define QUEUE_CFG_REQ_FLAGS_PATH_RX                        (0x1UL << 0)
-       #define QUEUE_CFG_REQ_FLAGS_PATH_LAST    QUEUE_CFG_REQ_FLAGS_PATH_RX
+       #define QUEUE_CFG_REQ_FLAGS_PATH_MASK                       0x3UL
+       #define QUEUE_CFG_REQ_FLAGS_PATH_SFT                        0
+       #define QUEUE_CFG_REQ_FLAGS_PATH_TX                        0x0UL
+       #define QUEUE_CFG_REQ_FLAGS_PATH_RX                        0x1UL
+       #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR                     0x2UL
+       #define QUEUE_CFG_REQ_FLAGS_PATH_LAST    QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
        __le32 enables;
        #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN                      0x1UL
        #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE               0x2UL
        __le32 queue_id;
        __le32 dflt_len;
        u8 service_profile;
-       #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY                (0x0UL << 0)
-       #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS             (0x1UL << 0)
-       #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN              (0xffUL << 0)
+       #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY                0x0UL
+       #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS             0x1UL
+       #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN              0xffUL
        u8 unused_0[7];
 };
 
@@ -2264,50 +2337,6 @@ struct hwrm_queue_cfg_output {
        u8 valid;
 };
 
-/* hwrm_queue_buffers_cfg */
-/* Input (56 bytes) */
-struct hwrm_queue_buffers_cfg_input {
-       __le16 req_type;
-       __le16 cmpl_ring;
-       __le16 seq_id;
-       __le16 target_id;
-       __le64 resp_addr;
-       __le32 flags;
-       #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH                    0x1UL
-       #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX                (0x0UL << 0)
-       #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX                (0x1UL << 0)
-       #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST    QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX
-       __le32 enables;
-       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED              0x1UL
-       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED                0x2UL
-       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF                  0x4UL
-       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON                   0x8UL
-       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL                  0x10UL
-       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL               0x20UL
-       #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX                   0x40UL
-       __le32 queue_id;
-       __le32 reserved;
-       __le32 shared;
-       __le32 xoff;
-       __le32 xon;
-       __le32 full;
-       __le32 notfull;
-       __le32 max;
-};
-
-/* Output (16 bytes) */
-struct hwrm_queue_buffers_cfg_output {
-       __le16 error_code;
-       __le16 req_type;
-       __le16 seq_id;
-       __le16 resp_len;
-       __le32 unused_0;
-       u8 unused_1;
-       u8 unused_2;
-       u8 unused_3;
-       u8 valid;
-};
-
 /* hwrm_queue_pfcenable_cfg */
 /* Input (24 bytes) */
 struct hwrm_queue_pfcenable_cfg_input {
@@ -2351,12 +2380,22 @@ struct hwrm_queue_pri2cos_cfg_input {
        __le16 target_id;
        __le64 resp_addr;
        __le32 flags;
-       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH                    0x1UL
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK               0x3UL
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT                0
        #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX                (0x0UL << 0)
        #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX                (0x1UL << 0)
-       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST    QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX
-       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN                   0x2UL
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR             (0x2UL << 0)
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST    QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+       #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN                   0x4UL
        __le32 enables;
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID    0x1UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID    0x2UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID    0x4UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID    0x8UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID    0x10UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID    0x20UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID    0x40UL
+       #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID    0x80UL
        u8 port_id;
        u8 pri0_cos_queue_id;
        u8 pri1_cos_queue_id;
@@ -2404,82 +2443,226 @@ struct hwrm_queue_cos2bw_cfg_input {
        u8 queue_id0;
        u8 unused_0;
        __le32 queue_id0_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id0_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
        u8 queue_id0_tsa_assign;
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP      (0x0UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS     (0x1UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
        u8 queue_id0_pri_lvl;
        u8 queue_id0_bw_weight;
        u8 queue_id1;
        __le32 queue_id1_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id1_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
        u8 queue_id1_tsa_assign;
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP      (0x0UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS     (0x1UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
        u8 queue_id1_pri_lvl;
        u8 queue_id1_bw_weight;
        u8 queue_id2;
        __le32 queue_id2_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id2_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
        u8 queue_id2_tsa_assign;
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP      (0x0UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS     (0x1UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
        u8 queue_id2_pri_lvl;
        u8 queue_id2_bw_weight;
        u8 queue_id3;
        __le32 queue_id3_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id3_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
        u8 queue_id3_tsa_assign;
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP      (0x0UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS     (0x1UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
        u8 queue_id3_pri_lvl;
        u8 queue_id3_bw_weight;
        u8 queue_id4;
        __le32 queue_id4_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id4_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
        u8 queue_id4_tsa_assign;
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP      (0x0UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS     (0x1UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
        u8 queue_id4_pri_lvl;
        u8 queue_id4_bw_weight;
        u8 queue_id5;
        __le32 queue_id5_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id5_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
        u8 queue_id5_tsa_assign;
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP      (0x0UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS     (0x1UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
        u8 queue_id5_pri_lvl;
        u8 queue_id5_bw_weight;
        u8 queue_id6;
        __le32 queue_id6_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id6_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
        u8 queue_id6_tsa_assign;
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP      (0x0UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS     (0x1UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
        u8 queue_id6_pri_lvl;
        u8 queue_id6_bw_weight;
        u8 queue_id7;
        __le32 queue_id7_min_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id7_max_bw;
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
        u8 queue_id7_tsa_assign;
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP      (0x0UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS     (0x1UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP      0x0UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS     0x1UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
        u8 queue_id7_pri_lvl;
        u8 queue_id7_bw_weight;
        u8 unused_1[5];
@@ -2563,6 +2746,7 @@ struct hwrm_vnic_cfg_input {
        #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE                    0x4UL
        #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE              0x8UL
        #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE              0x10UL
+       #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE                 0x20UL
        __le32 enables;
        #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP                  0x1UL
        #define VNIC_CFG_REQ_ENABLES_RSS_RULE                       0x2UL
@@ -2615,18 +2799,18 @@ struct hwrm_vnic_tpa_cfg_input {
        #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN                0x8UL
        __le16 vnic_id;
        __le16 max_agg_segs;
-       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1            (0x0UL << 0)
-       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2            (0x1UL << 0)
-       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4            (0x2UL << 0)
-       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8            (0x3UL << 0)
-       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX                  (0x1fUL << 0)
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1            0x0UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2            0x1UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4            0x2UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8            0x3UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX                  0x1fUL
        __le16 max_aggs;
-       #define VNIC_TPA_CFG_REQ_MAX_AGGS_1                        (0x0UL << 0)
-       #define VNIC_TPA_CFG_REQ_MAX_AGGS_2                        (0x1UL << 0)
-       #define VNIC_TPA_CFG_REQ_MAX_AGGS_4                        (0x2UL << 0)
-       #define VNIC_TPA_CFG_REQ_MAX_AGGS_8                        (0x3UL << 0)
-       #define VNIC_TPA_CFG_REQ_MAX_AGGS_16                       (0x4UL << 0)
-       #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX                      (0x7UL << 0)
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_1                        0x0UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_2                        0x1UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_4                        0x2UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_8                        0x3UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_16                       0x4UL
+       #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX                      0x7UL
        u8 unused_0;
        u8 unused_1;
        __le32 max_agg_timer;
@@ -2780,15 +2964,15 @@ struct hwrm_ring_alloc_input {
        __le64 resp_addr;
        __le32 enables;
        #define RING_ALLOC_REQ_ENABLES_RESERVED1                    0x1UL
-       #define RING_ALLOC_REQ_ENABLES_RESERVED2                    0x2UL
+       #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG                 0x2UL
        #define RING_ALLOC_REQ_ENABLES_RESERVED3                    0x4UL
        #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID            0x8UL
        #define RING_ALLOC_REQ_ENABLES_RESERVED4                    0x10UL
        #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID                 0x20UL
        u8 ring_type;
-       #define RING_ALLOC_REQ_RING_TYPE_CMPL                      (0x0UL << 0)
-       #define RING_ALLOC_REQ_RING_TYPE_TX                        (0x1UL << 0)
-       #define RING_ALLOC_REQ_RING_TYPE_RX                        (0x2UL << 0)
+       #define RING_ALLOC_REQ_RING_TYPE_CMPL                      0x0UL
+       #define RING_ALLOC_REQ_RING_TYPE_TX                        0x1UL
+       #define RING_ALLOC_REQ_RING_TYPE_RX                        0x2UL
        u8 unused_0;
        __le16 unused_1;
        __le64 page_tbl_addr;
@@ -2804,18 +2988,36 @@ struct hwrm_ring_alloc_input {
        u8 unused_4;
        u8 unused_5;
        __le32 reserved1;
-       __le16 reserved2;
+       __le16 ring_arb_cfg;
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK         0xfUL
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT          0
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP          (0x1UL << 0)
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ         (0x2UL << 0)
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST    RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+       #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK               0xf0UL
+       #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT                4
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK  0xff00UL
+       #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT   8
        u8 unused_6;
        u8 unused_7;
        __le32 reserved3;
        __le32 stat_ctx_id;
        __le32 reserved4;
        __le32 max_bw;
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK                 0xfffffffUL
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT                  0
+       #define RING_ALLOC_REQ_MAX_BW_RSVD                          0x10000000UL
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK            0xe0000000UL
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT     29
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MBPS           (0x0UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST    RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
        u8 int_mode;
-       #define RING_ALLOC_REQ_INT_MODE_LEGACY                     (0x0UL << 0)
-       #define RING_ALLOC_REQ_INT_MODE_RSVD                       (0x1UL << 0)
-       #define RING_ALLOC_REQ_INT_MODE_MSIX                       (0x2UL << 0)
-       #define RING_ALLOC_REQ_INT_MODE_POLL                       (0x3UL << 0)
+       #define RING_ALLOC_REQ_INT_MODE_LEGACY                     0x0UL
+       #define RING_ALLOC_REQ_INT_MODE_RSVD                       0x1UL
+       #define RING_ALLOC_REQ_INT_MODE_MSIX                       0x2UL
+       #define RING_ALLOC_REQ_INT_MODE_POLL                       0x3UL
        u8 unused_8[3];
 };
 
@@ -2842,9 +3044,9 @@ struct hwrm_ring_free_input {
        __le16 target_id;
        __le64 resp_addr;
        u8 ring_type;
-       #define RING_FREE_REQ_RING_TYPE_CMPL                       (0x0UL << 0)
-       #define RING_FREE_REQ_RING_TYPE_TX                         (0x1UL << 0)
-       #define RING_FREE_REQ_RING_TYPE_RX                         (0x2UL << 0)
+       #define RING_FREE_REQ_RING_TYPE_CMPL                       0x0UL
+       #define RING_FREE_REQ_RING_TYPE_TX                         0x1UL
+       #define RING_FREE_REQ_RING_TYPE_RX                         0x2UL
        u8 unused_0;
        __le16 ring_id;
        __le32 unused_1;
@@ -2942,9 +3144,9 @@ struct hwrm_ring_reset_input {
        __le16 target_id;
        __le64 resp_addr;
        u8 ring_type;
-       #define RING_RESET_REQ_RING_TYPE_CMPL                      (0x0UL << 0)
-       #define RING_RESET_REQ_RING_TYPE_TX                        (0x1UL << 0)
-       #define RING_RESET_REQ_RING_TYPE_RX                        (0x2UL << 0)
+       #define RING_RESET_REQ_RING_TYPE_CMPL                      0x0UL
+       #define RING_RESET_REQ_RING_TYPE_TX                        0x1UL
+       #define RING_RESET_REQ_RING_TYPE_RX                        0x2UL
        u8 unused_0;
        __le16 ring_id;
        __le32 unused_1;
@@ -3068,36 +3270,36 @@ struct hwrm_cfa_l2_filter_alloc_input {
        __le16 t_l2_ivlan;
        __le16 t_l2_ivlan_mask;
        u8 src_type;
-       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT             (0x0UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF                (0x1UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF                (0x2UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC              (0x3UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG              (0x4UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE               (0x5UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO              (0x6UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG              (0x7UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT             0x0UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF                0x1UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF                0x2UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC              0x3UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG              0x4UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE               0x5UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO              0x6UL
+       #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG              0x7UL
        u8 unused_6;
        __le32 src_id;
        u8 tunnel_type;
-       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL     (0x0UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN          (0x1UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE          (0x2UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE          (0x3UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP           (0x4UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE         (0x5UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS           (0x6UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT    (0x7UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE          (0x8UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL     (0xffUL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL     0x0UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN          0x1UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE          0x2UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE          0x3UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP           0x4UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE         0x5UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS           0x6UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT    0x7UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE          0x8UL
+       #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL     0xffUL
        u8 unused_7;
        __le16 dst_id;
        __le16 mirror_vnic_id;
        u8 pri_hint;
-       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER         (0x0UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER     (0x1UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER     (0x2UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX               (0x3UL << 0)
-       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN               (0x4UL << 0)
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER         0x0UL
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER     0x1UL
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER     0x2UL
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX               0x3UL
+       #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN               0x4UL
        u8 unused_8;
        __le32 unused_9;
        __le64 l2_filter_id_hint;
@@ -3246,16 +3448,16 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
        u8 l3_addr_type;
        u8 t_l3_addr_type;
        u8 tunnel_type;
-       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
-       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     (0x1UL << 0)
-       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     (0x2UL << 0)
-       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     (0x3UL << 0)
-       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      (0x4UL << 0)
-       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    (0x5UL << 0)
-       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      (0x6UL << 0)
-       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       (0x7UL << 0)
-       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     (0x8UL << 0)
-       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     0x3UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      0x4UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    0x5UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      0x6UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       0x7UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
+       #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
        u8 unused_0;
        __le32 vni;
        __le32 dst_vnic_id;
@@ -3311,14 +3513,14 @@ struct hwrm_cfa_encap_record_alloc_input {
        __le32 flags;
        #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK           0x1UL
        u8 encap_type;
-       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN       (0x1UL << 0)
-       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE       (0x2UL << 0)
-       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE       (0x3UL << 0)
-       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP         (0x4UL << 0)
-       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE      (0x5UL << 0)
-       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS         (0x6UL << 0)
-       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN         (0x7UL << 0)
-       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE       (0x8UL << 0)
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN       0x1UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE       0x2UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE       0x3UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP         0x4UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE      0x5UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS         0x6UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN         0x7UL
+       #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE       0x8UL
        u8 unused_0;
        __le16 unused_1;
        __le32 encap_data[16];
@@ -3397,32 +3599,32 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
        u8 src_macaddr[6];
        __be16 ethertype;
        u8 ip_addr_type;
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN  (0x0UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4     (0x4UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6     (0x6UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN  0x0UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4     0x4UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6     0x6UL
        u8 ip_protocol;
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN   (0x0UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP       (0x6UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP       (0x11UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN   0x0UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP       0x6UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP       0x11UL
        __le16 dst_id;
        __le16 mirror_vnic_id;
        u8 tunnel_type;
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     (0x1UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     (0x2UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     (0x3UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      (0x4UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    (0x5UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      (0x6UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       (0x7UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     (0x8UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     0x1UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     0x2UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     0x3UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      0x4UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    0x5UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      0x6UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       0x7UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     0x8UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
        u8 pri_hint;
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER    (0x0UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE         (0x1UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW         (0x2UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST      (0x3UL << 0)
-       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST       (0x4UL << 0)
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER    0x0UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE         0x1UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW         0x2UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST      0x3UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST       0x4UL
        __be32 src_ipaddr[4];
        __be32 src_ipaddr_mask[4];
        __be32 dst_ipaddr[4];
@@ -3511,8 +3713,8 @@ struct hwrm_tunnel_dst_port_query_input {
        __le16 target_id;
        __le64 resp_addr;
        u8 tunnel_type;
-       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN       (0x1UL << 0)
-       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE      (0x5UL << 0)
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN       0x1UL
+       #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE      0x5UL
        u8 unused_0[7];
 };
 
@@ -3539,8 +3741,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
        __le16 target_id;
        __le64 resp_addr;
        u8 tunnel_type;
-       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN       (0x1UL << 0)
-       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE      (0x5UL << 0)
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN       0x1UL
+       #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE      0x5UL
        u8 unused_0;
        __be16 tunnel_dst_port_val;
        __le32 unused_1;
@@ -3570,8 +3772,8 @@ struct hwrm_tunnel_dst_port_free_input {
        __le16 target_id;
        __le64 resp_addr;
        u8 tunnel_type;
-       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN         (0x1UL << 0)
-       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE       (0x5UL << 0)
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN         0x1UL
+       #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE       0x5UL
        u8 unused_0;
        __le16 tunnel_dst_port_id;
        __le32 unused_1;
@@ -3720,15 +3922,15 @@ struct hwrm_fw_reset_input {
        __le16 target_id;
        __le64 resp_addr;
        u8 embedded_proc_type;
-       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT               (0x0UL << 0)
-       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT               (0x1UL << 0)
-       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL    (0x2UL << 0)
-       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE               (0x3UL << 0)
-       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD               (0x4UL << 0)
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT               0x0UL
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT               0x1UL
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL    0x2UL
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE               0x3UL
+       #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD               0x4UL
        u8 selfrst_status;
-       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE    (0x0UL << 0)
-       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP    (0x1UL << 0)
-       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST         (0x2UL << 0)
+       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE    0x0UL
+       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP    0x1UL
+       #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST         0x2UL
        __le16 unused_0[3];
 };
 
@@ -3739,9 +3941,9 @@ struct hwrm_fw_reset_output {
        __le16 seq_id;
        __le16 resp_len;
        u8 selfrst_status;
-       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE           (0x0UL << 0)
-       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP           (0x1UL << 0)
-       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST       (0x2UL << 0)
+       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE           0x0UL
+       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP           0x1UL
+       #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST       0x2UL
        u8 unused_0;
        __le16 unused_1;
        u8 unused_2;
@@ -3759,11 +3961,11 @@ struct hwrm_fw_qstatus_input {
        __le16 target_id;
        __le64 resp_addr;
        u8 embedded_proc_type;
-       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT             (0x0UL << 0)
-       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT             (0x1UL << 0)
-       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL          (0x2UL << 0)
-       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE             (0x3UL << 0)
-       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD             (0x4UL << 0)
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT             0x0UL
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT             0x1UL
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL          0x2UL
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE             0x3UL
+       #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD             0x4UL
        u8 unused_0[7];
 };
 
@@ -3774,9 +3976,9 @@ struct hwrm_fw_qstatus_output {
        __le16 seq_id;
        __le16 resp_len;
        u8 selfrst_status;
-       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE         (0x0UL << 0)
-       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP         (0x1UL << 0)
-       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST     (0x2UL << 0)
+       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE         0x0UL
+       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP         0x1UL
+       #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST     0x2UL
        u8 unused_0;
        __le16 unused_1;
        u8 unused_2;
@@ -3785,6 +3987,42 @@ struct hwrm_fw_qstatus_output {
        u8 valid;
 };
 
+/* hwrm_fw_set_time */
+/* Input (32 bytes) */
+struct hwrm_fw_set_time_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 year;
+       #define FW_SET_TIME_REQ_YEAR_UNKNOWN                       0x0UL
+       u8 month;
+       u8 day;
+       u8 hour;
+       u8 minute;
+       u8 second;
+       u8 unused_0;
+       __le16 millisecond;
+       __le16 zone;
+       #define FW_SET_TIME_REQ_ZONE_UTC                           0x0UL
+       #define FW_SET_TIME_REQ_ZONE_UNKNOWN                       0xffffUL
+       __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_set_time_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
 /* hwrm_exec_fwd_resp */
 /* Input (128 bytes) */
 struct hwrm_exec_fwd_resp_input {
@@ -3921,32 +4159,6 @@ struct hwrm_temp_monitor_query_output {
        u8 valid;
 };
 
-/* hwrm_nvm_raw_write_blk */
-/* Input (32 bytes) */
-struct hwrm_nvm_raw_write_blk_input {
-       __le16 req_type;
-       __le16 cmpl_ring;
-       __le16 seq_id;
-       __le16 target_id;
-       __le64 resp_addr;
-       __le64 host_src_addr;
-       __le32 dest_addr;
-       __le32 len;
-};
-
-/* Output (16 bytes) */
-struct hwrm_nvm_raw_write_blk_output {
-       __le16 error_code;
-       __le16 req_type;
-       __le16 seq_id;
-       __le16 resp_len;
-       __le32 unused_0;
-       u8 unused_1;
-       u8 unused_2;
-       u8 unused_3;
-       u8 valid;
-};
-
 /* hwrm_nvm_read */
 /* Input (40 bytes) */
 struct hwrm_nvm_read_input {
@@ -4132,9 +4344,9 @@ struct hwrm_nvm_find_dir_entry_input {
        u8 opt_ordinal;
        #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK     0x3UL
        #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT              0
-       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ              (0x0UL << 0)
-       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE              (0x1UL << 0)
-       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT              (0x2UL << 0)
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ              0x0UL
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE              0x1UL
+       #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT              0x2UL
        u8 unused_1[3];
 };
 
@@ -4266,4 +4478,41 @@ struct hwrm_nvm_verify_update_output {
        u8 valid;
 };
 
+/* hwrm_nvm_install_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_install_update_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 install_type;
+       #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL         0x0UL
+       #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL    0xffffffffUL
+       __le32 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_install_update_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le64 installed_items;
+       u8 result;
+       #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS             0x0UL
+       u8 problem_item;
+       #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE          0x0UL
+       #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE      0xffUL
+       u8 reset_required;
+       #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE       0x0UL
+       #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI         0x1UL
+       #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER      0x2UL
+       u8 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
 #endif
index 50d2007..8be7185 100644 (file)
 #include "bnxt_ethtool.h"
 
 #ifdef CONFIG_BNXT_SRIOV
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
+                                         struct bnxt_vf_info *vf, u16 event_id)
+{
+       struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_fwd_async_event_cmpl_input req = {0};
+       struct hwrm_async_event_cmpl *async_cmpl;
+       int rc = 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+       if (vf)
+               req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+       else
+               /* broadcast this async event to all VFs */
+               req.encap_async_event_target_id = cpu_to_le16(0xffff);
+       async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+       async_cmpl->type =
+               cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+       async_cmpl->event_id = cpu_to_le16(event_id);
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+       if (rc) {
+               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
+                          rc);
+               goto fwd_async_event_cmpl_exit;
+       }
+
+       if (resp->error_code) {
+               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
+                          resp->error_code);
+               rc = -1;
+       }
+
+fwd_async_event_cmpl_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
 {
        if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
@@ -243,8 +282,9 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
                rc = -EINVAL;
                break;
        }
-       /* CHIMP TODO: send msg to VF to update new link state */
-
+       if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
+               rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
+                       HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
        return rc;
 }
 
@@ -525,46 +565,6 @@ err_out1:
        return rc;
 }
 
-static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
-                                         struct bnxt_vf_info *vf,
-                                         u16 event_id)
-{
-       int rc = 0;
-       struct hwrm_fwd_async_event_cmpl_input req = {0};
-       struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
-       struct hwrm_async_event_cmpl *async_cmpl;
-
-       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
-       if (vf)
-               req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
-       else
-               /* broadcast this async event to all VFs */
-               req.encap_async_event_target_id = cpu_to_le16(0xffff);
-       async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
-       async_cmpl->type =
-               cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
-       async_cmpl->event_id = cpu_to_le16(event_id);
-
-       mutex_lock(&bp->hwrm_cmd_lock);
-       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
-       if (rc) {
-               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
-                          rc);
-               goto fwd_async_event_cmpl_exit;
-       }
-
-       if (resp->error_code) {
-               netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
-                          resp->error_code);
-               rc = -1;
-       }
-
-fwd_async_event_cmpl_exit:
-       mutex_unlock(&bp->hwrm_cmd_lock);
-       return rc;
-}
-
 void bnxt_sriov_disable(struct bnxt *bp)
 {
        u16 num_vfs = pci_num_vf(bp->pdev);
index 46f9043..2013474 100644 (file)
@@ -450,8 +450,8 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
                        genet_dma_ring_regs[r]);
 }
 
-static int bcmgenet_get_settings(struct net_device *dev,
-                                struct ethtool_cmd *cmd)
+static int bcmgenet_get_link_ksettings(struct net_device *dev,
+                                      struct ethtool_link_ksettings *cmd)
 {
        if (!netif_running(dev))
                return -EINVAL;
@@ -459,11 +459,11 @@ static int bcmgenet_get_settings(struct net_device *dev,
        if (!dev->phydev)
                return -ENODEV;
 
-       return phy_ethtool_gset(dev->phydev, cmd);
+       return phy_ethtool_ksettings_get(dev->phydev, cmd);
 }
 
-static int bcmgenet_set_settings(struct net_device *dev,
-                                struct ethtool_cmd *cmd)
+static int bcmgenet_set_link_ksettings(struct net_device *dev,
+                                      const struct ethtool_link_ksettings *cmd)
 {
        if (!netif_running(dev))
                return -EINVAL;
@@ -471,7 +471,7 @@ static int bcmgenet_set_settings(struct net_device *dev,
        if (!dev->phydev)
                return -ENODEV;
 
-       return phy_ethtool_sset(dev->phydev, cmd);
+       return phy_ethtool_ksettings_set(dev->phydev, cmd);
 }
 
 static int bcmgenet_set_rx_csum(struct net_device *dev,
@@ -977,8 +977,6 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
        .get_strings            = bcmgenet_get_strings,
        .get_sset_count         = bcmgenet_get_sset_count,
        .get_ethtool_stats      = bcmgenet_get_ethtool_stats,
-       .get_settings           = bcmgenet_get_settings,
-       .set_settings           = bcmgenet_set_settings,
        .get_drvinfo            = bcmgenet_get_drvinfo,
        .get_link               = ethtool_op_get_link,
        .get_msglevel           = bcmgenet_get_msglevel,
@@ -990,6 +988,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
        .nway_reset             = bcmgenet_nway_reset,
        .get_coalesce           = bcmgenet_get_coalesce,
        .set_coalesce           = bcmgenet_set_coalesce,
+       .get_link_ksettings     = bcmgenet_get_link_ksettings,
+       .set_link_ksettings     = bcmgenet_set_link_ksettings,
 };
 
 /* Power down the unimac, based on mode. */
index 15d02da..9cffe48 100644 (file)
@@ -4382,7 +4382,7 @@ err:
 }
 
 /* This routine returns a list of all the NIC PF_nums in the adapter */
-u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
+static u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
 {
        struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
        struct be_pcie_res_desc *pcie = NULL;
@@ -4534,7 +4534,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
 }
 
 /* Mark all fields invalid */
-void be_reset_nic_desc(struct be_nic_res_desc *nic)
+static void be_reset_nic_desc(struct be_nic_res_desc *nic)
 {
        memset(nic, 0, sizeof(*nic));
        nic->unicast_mac_count = 0xFFFF;
@@ -4907,8 +4907,9 @@ err:
        return status;
 }
 
-int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
-                                    int link_state, int version, u8 domain)
+static int
+__be_cmd_set_logical_link_config(struct be_adapter *adapter,
+                                int link_state, int version, u8 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_set_ll_link *req;
index 34f63ef..9a94840 100644 (file)
@@ -4365,7 +4365,7 @@ static void be_setup_init(struct be_adapter *adapter)
  * for distribution between the VFs. This self-imposed limit will determine the
  * no: of VFs for which RSS can be enabled.
  */
-void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
+static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
 {
        struct be_port_resources port_res = {0};
        u8 rss_tables_on_port;
index c80073e..c46355b 100644 (file)
@@ -906,11 +906,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                                        length, tx_index,
                                                        &doorbell_pending))
                                        goto consumed;
-                               goto next; /* Drop on xmit failure */
+                               goto xdp_drop; /* Drop on xmit failure */
                        default:
                                bpf_warn_invalid_xdp_action(act);
                        case XDP_ABORTED:
                        case XDP_DROP:
+xdp_drop:
                                if (mlx4_en_rx_recycle(ring, frags))
                                        goto consumed;
                                goto next;
index 654b76f..4927494 100644 (file)
@@ -81,9 +81,6 @@ enum {
                            MC_ADDR_CHANGE | \
                            PROMISC_CHANGE)
 
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
-void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
-
 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
                                        u32 events_mask)
 {
index 6855783..b96e8c9 100644 (file)
@@ -209,6 +209,9 @@ struct mlx5_eswitch {
        int                     mode;
 };
 
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+
 /* E-Switch API */
 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
index 4e2354c..6460c72 100644 (file)
@@ -1392,7 +1392,7 @@ static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
 {
        MLXSW_REG_ZERO(slcr, payload);
        mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
-       mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_XOR);
+       mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC);
        mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
 }
 
index 171f8dd..fa31261 100644 (file)
@@ -248,7 +248,8 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
        span_entry->used = false;
 }
 
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
 {
        struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
        int i;
@@ -262,7 +263,8 @@ struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
        return NULL;
 }
 
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry
+*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
 {
        struct mlxsw_sp_span_entry *span_entry;
 
@@ -364,7 +366,8 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
        }
 
        /* bind the port to the SPAN entry */
-       mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id);
+       mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+                           (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
        if (err)
                goto err_mpar_reg_write;
@@ -405,7 +408,8 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
                return;
 
        /* remove the inspected port */
-       mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id);
+       mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+                           (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
 
        /* remove the SBIB buffer if it was egress SPAN */
index 953b214..bcaed8a 100644 (file)
@@ -595,9 +595,9 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
        enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
        struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
 
-       pool_info->pool_type = dir;
+       pool_info->pool_type = (enum devlink_sb_pool_type) dir;
        pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
-       pool_info->threshold_type = pr->mode;
+       pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
        return 0;
 }
 
@@ -608,9 +608,10 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
        u8 pool = pool_get(pool_index);
        enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
-       enum mlxsw_reg_sbpr_mode mode = threshold_type;
        u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
+       enum mlxsw_reg_sbpr_mode mode;
 
+       mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
        return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
 }
 
@@ -696,13 +697,13 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        u8 local_port = mlxsw_sp_port->local_port;
        u8 pg_buff = tc_index;
-       enum mlxsw_reg_sbxx_dir dir = pool_type;
+       enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
        struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
                                                       pg_buff, dir);
 
        *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
                                                 cm->max_buff);
-       *p_pool_index = pool_index_get(cm->pool, pool_type);
+       *p_pool_index = pool_index_get(cm->pool, dir);
        return 0;
 }
 
@@ -716,7 +717,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        u8 local_port = mlxsw_sp_port->local_port;
        u8 pg_buff = tc_index;
-       enum mlxsw_reg_sbxx_dir dir = pool_type;
+       enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
        u8 pool = pool_get(pool_index);
        u32 max_buff;
        int err;
@@ -943,7 +944,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        u8 local_port = mlxsw_sp_port->local_port;
        u8 pg_buff = tc_index;
-       enum mlxsw_reg_sbxx_dir dir = pool_type;
+       enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
        struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
                                                       pg_buff, dir);
 
index 3f5c51d..4afb498 100644 (file)
@@ -252,7 +252,9 @@ static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
 {
        char ralta_pl[MLXSW_REG_RALTA_LEN];
 
-       mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
+       mlxsw_reg_ralta_pack(ralta_pl, true,
+                            (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+                            lpm_tree->id);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
 }
 
@@ -261,7 +263,9 @@ static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
 {
        char ralta_pl[MLXSW_REG_RALTA_LEN];
 
-       mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
+       mlxsw_reg_ralta_pack(ralta_pl, false,
+                            (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+                            lpm_tree->id);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
 }
 
@@ -384,7 +388,9 @@ static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
 {
        char raltb_pl[MLXSW_REG_RALTB_LEN];
 
-       mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
+       mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+                            (enum mlxsw_reg_ralxx_protocol) vr->proto,
+                            vr->lpm_tree->id);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
 }
 
@@ -394,7 +400,8 @@ static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
        char raltb_pl[MLXSW_REG_RALTB_LEN];
 
        /* Bind to tree 0 which is default */
-       mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
+       mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+                            (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
 }
 
@@ -1081,9 +1088,10 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
 {
        char raleu_pl[MLXSW_REG_RALEU_LEN];
 
-       mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id,
-                            adj_index, ecmp_size,
-                            new_adj_index, new_ecmp_size);
+       mlxsw_reg_raleu_pack(raleu_pl,
+                            (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
+                            adj_index, ecmp_size, new_adj_index,
+                            new_ecmp_size);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
 }
 
@@ -1558,8 +1566,9 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
                trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
        }
 
-       mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
-                             fib_entry->key.prefix_len, *p_dip);
+       mlxsw_reg_ralue_pack4(ralue_pl,
+                             (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+                             vr->id, fib_entry->key.prefix_len, *p_dip);
        mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
                                        adjacency_index, ecmp_size);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1573,8 +1582,9 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
        u32 *p_dip = (u32 *) fib_entry->key.addr;
        struct mlxsw_sp_vr *vr = fib_entry->vr;
 
-       mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
-                             fib_entry->key.prefix_len, *p_dip);
+       mlxsw_reg_ralue_pack4(ralue_pl,
+                             (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+                             vr->id, fib_entry->key.prefix_len, *p_dip);
        mlxsw_reg_ralue_act_local_pack(ralue_pl,
                                       MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
                                       fib_entry->rif);
@@ -1589,8 +1599,9 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
        u32 *p_dip = (u32 *) fib_entry->key.addr;
        struct mlxsw_sp_vr *vr = fib_entry->vr;
 
-       mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
-                             fib_entry->key.prefix_len, *p_dip);
+       mlxsw_reg_ralue_pack4(ralue_pl,
+                             (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+                             vr->id, fib_entry->key.prefix_len, *p_dip);
        mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
 }
@@ -1753,8 +1764,8 @@ mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
                                         fib4->fi->fib_dev);
 }
 
-void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
-                           struct mlxsw_sp_fib_entry *fib_entry)
+static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_fib_entry *fib_entry)
 {
        struct mlxsw_sp_vr *vr = fib_entry->vr;
 
index 5a1e985..470b3dc 100644 (file)
@@ -127,7 +127,7 @@ struct sixpack {
 
 #define AX25_6PACK_HEADER_LEN 0
 
-static void sixpack_decode(struct sixpack *, unsigned char[], int);
+static void sixpack_decode(struct sixpack *, const unsigned char[], int);
 static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
 
 /*
@@ -428,7 +428,7 @@ out:
 
 /*
  * Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
+ * This function is called by the tty module in the kernel when
  * a block of 6pack data has been received, which can now be decapsulated
  * and sent on to some IP layer for further processing.
  */
@@ -436,7 +436,6 @@ static void sixpack_receive_buf(struct tty_struct *tty,
        const unsigned char *cp, char *fp, int count)
 {
        struct sixpack *sp;
-       unsigned char buf[512];
        int count1;
 
        if (!count)
@@ -446,10 +445,7 @@ static void sixpack_receive_buf(struct tty_struct *tty,
        if (!sp)
                return;
 
-       memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
-
        /* Read the characters out of the buffer */
-
        count1 = count;
        while (count) {
                count--;
@@ -459,7 +455,7 @@ static void sixpack_receive_buf(struct tty_struct *tty,
                        continue;
                }
        }
-       sixpack_decode(sp, buf, count1);
+       sixpack_decode(sp, cp, count1);
 
        sp_put(sp);
        tty_unthrottle(tty);
@@ -992,7 +988,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
 /* decode a 6pack packet */
 
 static void
-sixpack_decode(struct sixpack *sp, unsigned char *pre_rbuff, int count)
+sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
 {
        unsigned char inbyte;
        int count1;
index 15f8206..7c00e50 100644 (file)
@@ -55,7 +55,7 @@ static int lan88xx_phy_ack_interrupt(struct phy_device *phydev)
        return rc < 0 ? rc : 0;
 }
 
-int lan88xx_suspend(struct phy_device *phydev)
+static int lan88xx_suspend(struct phy_device *phydev)
 {
        struct lan88xx_priv *priv = phydev->priv;
 
index c09cc4a..d350deb 100644 (file)
@@ -23,6 +23,16 @@ enum rgmii_rx_clock_delay {
        RGMII_RX_CLK_DELAY_3_4_NS = 7
 };
 
+/* Microsemi VSC85xx PHY registers */
+/* IEEE 802. Std Registers */
+#define MSCC_PHY_EXT_PHY_CNTL_1           23
+#define MAC_IF_SELECTION_MASK             0x1800
+#define MAC_IF_SELECTION_GMII             0
+#define MAC_IF_SELECTION_RMII             1
+#define MAC_IF_SELECTION_RGMII            2
+#define MAC_IF_SELECTION_POS              11
+#define FAR_END_LOOPBACK_MODE_MASK        0x0008
+
 #define MII_VSC85XX_INT_MASK             25
 #define MII_VSC85XX_INT_MASK_MASK        0xa000
 #define MII_VSC85XX_INT_STATUS           26
@@ -48,6 +58,42 @@ static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
        return rc;
 }
 
+static int vsc85xx_mac_if_set(struct phy_device *phydev,
+                             phy_interface_t interface)
+{
+       int rc;
+       u16 reg_val;
+
+       mutex_lock(&phydev->lock);
+       reg_val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
+       reg_val &= ~(MAC_IF_SELECTION_MASK);
+       switch (interface) {
+       case PHY_INTERFACE_MODE_RGMII:
+               reg_val |= (MAC_IF_SELECTION_RGMII << MAC_IF_SELECTION_POS);
+               break;
+       case PHY_INTERFACE_MODE_RMII:
+               reg_val |= (MAC_IF_SELECTION_RMII << MAC_IF_SELECTION_POS);
+               break;
+       case PHY_INTERFACE_MODE_MII:
+       case PHY_INTERFACE_MODE_GMII:
+               reg_val |= (MAC_IF_SELECTION_GMII << MAC_IF_SELECTION_POS);
+               break;
+       default:
+               rc = -EINVAL;
+               goto out_unlock;
+       }
+       rc = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, reg_val);
+       if (rc != 0)
+               goto out_unlock;
+
+       rc = genphy_soft_reset(phydev);
+
+out_unlock:
+       mutex_unlock(&phydev->lock);
+
+       return rc;
+}
+
 static int vsc85xx_default_config(struct phy_device *phydev)
 {
        int rc;
@@ -77,6 +123,11 @@ static int vsc85xx_config_init(struct phy_device *phydev)
        rc = vsc85xx_default_config(phydev);
        if (rc)
                return rc;
+
+       rc = vsc85xx_mac_if_set(phydev, phydev->interface);
+       if (rc)
+               return rc;
+
        rc = genphy_config_init(phydev);
 
        return rc;
index 96ccd4e..e17879d 100644 (file)
@@ -565,6 +565,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct netfront_queue *queue = NULL;
        unsigned int num_queues = dev->real_num_tx_queues;
        u16 queue_index;
+       struct sk_buff *nskb;
 
        /* Drop the packet if no queues are set up */
        if (num_queues < 1)
@@ -593,6 +594,20 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        page = virt_to_page(skb->data);
        offset = offset_in_page(skb->data);
+
+       /* The first req should be at least ETH_HLEN size or the packet will be
+        * dropped by netback.
+        */
+       if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+               nskb = skb_copy(skb, GFP_ATOMIC);
+               if (!nskb)
+                       goto drop;
+               dev_kfree_skb_any(skb);
+               skb = nskb;
+               page = virt_to_page(skb->data);
+               offset = offset_in_page(skb->data);
+       }
+
        len = skb_headlen(skb);
 
        spin_lock_irqsave(&queue->tx_lock, flags);
index fd82584..5c132d3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Resizable, Scalable, Concurrent Hash Table
  *
- * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  *
@@ -53,6 +53,11 @@ struct rhash_head {
        struct rhash_head __rcu         *next;
 };
 
+struct rhlist_head {
+       struct rhash_head               rhead;
+       struct rhlist_head __rcu        *next;
+};
+
 /**
  * struct bucket_table - Table of hash buckets
  * @size: Number of hash buckets
@@ -137,6 +142,7 @@ struct rhashtable_params {
  * @key_len: Key length for hashfn
  * @elasticity: Maximum chain length before rehash
  * @p: Configuration parameters
+ * @rhlist: True if this is an rhltable
  * @run_work: Deferred worker to expand/shrink asynchronously
  * @mutex: Mutex to protect current/future table swapping
  * @lock: Spin lock to protect walker list
@@ -147,11 +153,20 @@ struct rhashtable {
        unsigned int                    key_len;
        unsigned int                    elasticity;
        struct rhashtable_params        p;
+       bool                            rhlist;
        struct work_struct              run_work;
        struct mutex                    mutex;
        spinlock_t                      lock;
 };
 
+/**
+ * struct rhltable - Hash table with duplicate objects in a list
+ * @ht: Underlying rhtable
+ */
+struct rhltable {
+       struct rhashtable ht;
+};
+
 /**
  * struct rhashtable_walker - Hash table walker
  * @list: List entry on list of walkers
@@ -163,9 +178,10 @@ struct rhashtable_walker {
 };
 
 /**
- * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * struct rhashtable_iter - Hash table iterator
  * @ht: Table to iterate through
  * @p: Current pointer
+ * @list: Current hash list pointer
  * @walker: Associated rhashtable walker
  * @slot: Current slot
  * @skip: Number of entries to skip in slot
@@ -173,6 +189,7 @@ struct rhashtable_walker {
 struct rhashtable_iter {
        struct rhashtable *ht;
        struct rhash_head *p;
+       struct rhlist_head *list;
        struct rhashtable_walker walker;
        unsigned int slot;
        unsigned int skip;
@@ -339,13 +356,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
 
 int rhashtable_init(struct rhashtable *ht,
                    const struct rhashtable_params *params);
+int rhltable_init(struct rhltable *hlt,
+                 const struct rhashtable_params *params);
 
-struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
-                                           const void *key,
-                                           struct rhash_head *obj,
-                                           struct bucket_table *old_tbl,
-                                           void **data);
-int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
+void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+                            struct rhash_head *obj);
 
 void rhashtable_walk_enter(struct rhashtable *ht,
                           struct rhashtable_iter *iter);
@@ -507,6 +522,31 @@ void rhashtable_destroy(struct rhashtable *ht);
        rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
                                        tbl, hash, member)
 
+/**
+ * rhl_for_each_rcu - iterate over rcu hash table list
+ * @pos:       the &struct rlist_head to use as a loop cursor.
+ * @list:      the head of the list
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_rcu(pos, list)                                    \
+       for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
+
+/**
+ * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct rlist_head to use as a loop cursor.
+ * @list:      the head of the list
+ * @member:    name of the &struct rlist_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_entry_rcu(tpos, pos, list, member)                        \
+       for (pos = list; pos && rht_entry(tpos, pos, member);           \
+            pos = rcu_dereference_raw(pos->next))
+
 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
                                     const void *obj)
 {
@@ -516,18 +556,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
        return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
 }
 
-/**
- * rhashtable_lookup_fast - search hash table, inlined version
- * @ht:                hash table
- * @key:       the pointer to the key
- * @params:    hash table parameters
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * Returns the first entry on which the compare function returned true.
- */
-static inline void *rhashtable_lookup_fast(
+/* Internal function, do not use. */
+static inline struct rhash_head *__rhashtable_lookup(
        struct rhashtable *ht, const void *key,
        const struct rhashtable_params params)
 {
@@ -539,8 +569,6 @@ static inline void *rhashtable_lookup_fast(
        struct rhash_head *he;
        unsigned int hash;
 
-       rcu_read_lock();
-
        tbl = rht_dereference_rcu(ht->tbl, ht);
 restart:
        hash = rht_key_hashfn(ht, tbl, key, params);
@@ -549,8 +577,7 @@ restart:
                    params.obj_cmpfn(&arg, rht_obj(ht, he)) :
                    rhashtable_compare(&arg, rht_obj(ht, he)))
                        continue;
-               rcu_read_unlock();
-               return rht_obj(ht, he);
+               return he;
        }
 
        /* Ensure we see any new tables. */
@@ -559,96 +586,165 @@ restart:
        tbl = rht_dereference_rcu(tbl->future_tbl, ht);
        if (unlikely(tbl))
                goto restart;
-       rcu_read_unlock();
 
        return NULL;
 }
 
+/**
+ * rhashtable_lookup - search hash table
+ * @ht:                hash table
+ * @key:       the pointer to the key
+ * @params:    hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup(
+       struct rhashtable *ht, const void *key,
+       const struct rhashtable_params params)
+{
+       struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+
+       return he ? rht_obj(ht, he) : NULL;
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, without RCU read lock
+ * @ht:                hash table
+ * @key:       the pointer to the key
+ * @params:    hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Only use this function when you have other mechanisms guaranteeing
+ * that the object won't go away after the RCU read lock is released.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+       struct rhashtable *ht, const void *key,
+       const struct rhashtable_params params)
+{
+       void *obj;
+
+       rcu_read_lock();
+       obj = rhashtable_lookup(ht, key, params);
+       rcu_read_unlock();
+
+       return obj;
+}
+
+/**
+ * rhltable_lookup - search hash list table
+ * @hlt:       hash table
+ * @key:       the pointer to the key
+ * @params:    hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key.  All matching entries are returned
+ * in a list.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the list of entries that match the given key.
+ */
+static inline struct rhlist_head *rhltable_lookup(
+       struct rhltable *hlt, const void *key,
+       const struct rhashtable_params params)
+{
+       struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
+
+       return he ? container_of(he, struct rhlist_head, rhead) : NULL;
+}
+
 /* Internal function, please use rhashtable_insert_fast() instead. This
  * function returns the existing element already in hashes in there is a clash,
  * otherwise it returns an error via ERR_PTR().
  */
 static inline void *__rhashtable_insert_fast(
        struct rhashtable *ht, const void *key, struct rhash_head *obj,
-       const struct rhashtable_params params)
+       const struct rhashtable_params params, bool rhlist)
 {
        struct rhashtable_compare_arg arg = {
                .ht = ht,
                .key = key,
        };
-       struct bucket_table *tbl, *new_tbl;
+       struct rhash_head __rcu **pprev;
+       struct bucket_table *tbl;
        struct rhash_head *head;
        spinlock_t *lock;
-       unsigned int elasticity;
        unsigned int hash;
-       void *data = NULL;
-       int err;
+       int elasticity;
+       void *data;
 
-restart:
        rcu_read_lock();
 
        tbl = rht_dereference_rcu(ht->tbl, ht);
+       hash = rht_head_hashfn(ht, tbl, obj, params);
+       lock = rht_bucket_lock(tbl, hash);
+       spin_lock_bh(lock);
 
-       /* All insertions must grab the oldest table containing
-        * the hashed bucket that is yet to be rehashed.
-        */
-       for (;;) {
-               hash = rht_head_hashfn(ht, tbl, obj, params);
-               lock = rht_bucket_lock(tbl, hash);
-               spin_lock_bh(lock);
-
-               if (tbl->rehash <= hash)
-                       break;
-
+       if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
+slow_path:
                spin_unlock_bh(lock);
-               tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+               rcu_read_unlock();
+               return rhashtable_insert_slow(ht, key, obj);
        }
 
-       new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
-       if (unlikely(new_tbl)) {
-               tbl = rhashtable_insert_slow(ht, key, obj, new_tbl, &data);
-               if (!IS_ERR_OR_NULL(tbl))
-                       goto slow_path;
+       elasticity = ht->elasticity;
+       pprev = &tbl->buckets[hash];
+       rht_for_each(head, tbl, hash) {
+               struct rhlist_head *plist;
+               struct rhlist_head *list;
+
+               elasticity--;
+               if (!key ||
+                   (params.obj_cmpfn ?
+                    params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+                    rhashtable_compare(&arg, rht_obj(ht, head))))
+                       continue;
+
+               data = rht_obj(ht, head);
 
-               err = PTR_ERR(tbl);
-               if (err == -EEXIST)
-                       err = 0;
+               if (!rhlist)
+                       goto out;
 
-               goto out;
-       }
 
-       err = -E2BIG;
-       if (unlikely(rht_grow_above_max(ht, tbl)))
-               goto out;
+               list = container_of(obj, struct rhlist_head, rhead);
+               plist = container_of(head, struct rhlist_head, rhead);
 
-       if (unlikely(rht_grow_above_100(ht, tbl))) {
-slow_path:
-               spin_unlock_bh(lock);
-               err = rhashtable_insert_rehash(ht, tbl);
-               rcu_read_unlock();
-               if (err)
-                       return ERR_PTR(err);
+               RCU_INIT_POINTER(list->next, plist);
+               head = rht_dereference_bucket(head->next, tbl, hash);
+               RCU_INIT_POINTER(list->rhead.next, head);
+               rcu_assign_pointer(*pprev, obj);
 
-               goto restart;
+               goto good;
        }
 
-       err = 0;
-       elasticity = ht->elasticity;
-       rht_for_each(head, tbl, hash) {
-               if (key &&
-                   unlikely(!(params.obj_cmpfn ?
-                              params.obj_cmpfn(&arg, rht_obj(ht, head)) :
-                              rhashtable_compare(&arg, rht_obj(ht, head))))) {
-                       data = rht_obj(ht, head);
-                       goto out;
-               }
-               if (!--elasticity)
-                       goto slow_path;
-       }
+       if (elasticity <= 0)
+               goto slow_path;
+
+       data = ERR_PTR(-E2BIG);
+       if (unlikely(rht_grow_above_max(ht, tbl)))
+               goto out;
+
+       if (unlikely(rht_grow_above_100(ht, tbl)))
+               goto slow_path;
 
        head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
 
        RCU_INIT_POINTER(obj->next, head);
+       if (rhlist) {
+               struct rhlist_head *list;
+
+               list = container_of(obj, struct rhlist_head, rhead);
+               RCU_INIT_POINTER(list->next, NULL);
+       }
 
        rcu_assign_pointer(tbl->buckets[hash], obj);
 
@@ -656,11 +752,14 @@ slow_path:
        if (rht_grow_above_75(ht, tbl))
                schedule_work(&ht->run_work);
 
+good:
+       data = NULL;
+
 out:
        spin_unlock_bh(lock);
        rcu_read_unlock();
 
-       return err ? ERR_PTR(err) : data;
+       return data;
 }
 
 /**
@@ -685,13 +784,65 @@ static inline int rhashtable_insert_fast(
 {
        void *ret;
 
-       ret = __rhashtable_insert_fast(ht, NULL, obj, params);
+       ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
        if (IS_ERR(ret))
                return PTR_ERR(ret);
 
        return ret == NULL ? 0 : -EEXIST;
 }
 
+/**
+ * rhltable_insert_key - insert object into hash list table
+ * @hlt:       hash list table
+ * @key:       the pointer to the key
+ * @list:      pointer to hash list head inside object
+ * @params:    hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert_key(
+       struct rhltable *hlt, const void *key, struct rhlist_head *list,
+       const struct rhashtable_params params)
+{
+       return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
+                                               params, true));
+}
+
+/**
+ * rhltable_insert - insert object into hash list table
+ * @hlt:       hash list table
+ * @list:      pointer to hash list head inside object
+ * @params:    hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert(
+       struct rhltable *hlt, struct rhlist_head *list,
+       const struct rhashtable_params params)
+{
+       const char *key = rht_obj(&hlt->ht, &list->rhead);
+
+       key += params.key_offset;
+
+       return rhltable_insert_key(hlt, key, list, params);
+}
+
 /**
  * rhashtable_lookup_insert_fast - lookup and insert object into hash table
  * @ht:                hash table
@@ -722,7 +873,8 @@ static inline int rhashtable_lookup_insert_fast(
 
        BUG_ON(ht->p.obj_hashfn);
 
-       ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params);
+       ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
+                                      false);
        if (IS_ERR(ret))
                return PTR_ERR(ret);
 
@@ -759,7 +911,7 @@ static inline int rhashtable_lookup_insert_key(
 
        BUG_ON(!ht->p.obj_hashfn || !key);
 
-       ret = __rhashtable_insert_fast(ht, key, obj, params);
+       ret = __rhashtable_insert_fast(ht, key, obj, params, false);
        if (IS_ERR(ret))
                return PTR_ERR(ret);
 
@@ -783,13 +935,14 @@ static inline void *rhashtable_lookup_get_insert_key(
 {
        BUG_ON(!ht->p.obj_hashfn || !key);
 
-       return __rhashtable_insert_fast(ht, key, obj, params);
+       return __rhashtable_insert_fast(ht, key, obj, params, false);
 }
 
 /* Internal function, please use rhashtable_remove_fast() instead */
-static inline int __rhashtable_remove_fast(
+static inline int __rhashtable_remove_fast_one(
        struct rhashtable *ht, struct bucket_table *tbl,
-       struct rhash_head *obj, const struct rhashtable_params params)
+       struct rhash_head *obj, const struct rhashtable_params params,
+       bool rhlist)
 {
        struct rhash_head __rcu **pprev;
        struct rhash_head *he;
@@ -804,39 +957,66 @@ static inline int __rhashtable_remove_fast(
 
        pprev = &tbl->buckets[hash];
        rht_for_each(he, tbl, hash) {
+               struct rhlist_head *list;
+
+               list = container_of(he, struct rhlist_head, rhead);
+
                if (he != obj) {
+                       struct rhlist_head __rcu **lpprev;
+
                        pprev = &he->next;
-                       continue;
+
+                       if (!rhlist)
+                               continue;
+
+                       do {
+                               lpprev = &list->next;
+                               list = rht_dereference_bucket(list->next,
+                                                             tbl, hash);
+                       } while (list && obj != &list->rhead);
+
+                       if (!list)
+                               continue;
+
+                       list = rht_dereference_bucket(list->next, tbl, hash);
+                       RCU_INIT_POINTER(*lpprev, list);
+                       err = 0;
+                       break;
                }
 
-               rcu_assign_pointer(*pprev, obj->next);
-               err = 0;
+               obj = rht_dereference_bucket(obj->next, tbl, hash);
+               err = 1;
+
+               if (rhlist) {
+                       list = rht_dereference_bucket(list->next, tbl, hash);
+                       if (list) {
+                               RCU_INIT_POINTER(list->rhead.next, obj);
+                               obj = &list->rhead;
+                               err = 0;
+                       }
+               }
+
+               rcu_assign_pointer(*pprev, obj);
                break;
        }
 
        spin_unlock_bh(lock);
 
+       if (err > 0) {
+               atomic_dec(&ht->nelems);
+               if (unlikely(ht->p.automatic_shrinking &&
+                            rht_shrink_below_30(ht, tbl)))
+                       schedule_work(&ht->run_work);
+               err = 0;
+       }
+
        return err;
 }
 
-/**
- * rhashtable_remove_fast - remove object from hash table
- * @ht:                hash table
- * @obj:       pointer to hash head inside object
- * @params:    hash table parameters
- *
- * Since the hash chain is single linked, the removal operation needs to
- * walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
- *
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
- *
- * Returns zero on success, -ENOENT if the entry could not be found.
- */
-static inline int rhashtable_remove_fast(
+/* Internal function, please use rhashtable_remove_fast() instead */
+static inline int __rhashtable_remove_fast(
        struct rhashtable *ht, struct rhash_head *obj,
-       const struct rhashtable_params params)
+       const struct rhashtable_params params, bool rhlist)
 {
        struct bucket_table *tbl;
        int err;
@@ -850,24 +1030,60 @@ static inline int rhashtable_remove_fast(
         * visible then that guarantees the entry to still be in
         * the old tbl if it exists.
         */
-       while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+       while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
+                                                  rhlist)) &&
               (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
                ;
 
-       if (err)
-               goto out;
-
-       atomic_dec(&ht->nelems);
-       if (unlikely(ht->p.automatic_shrinking &&
-                    rht_shrink_below_30(ht, tbl)))
-               schedule_work(&ht->run_work);
-
-out:
        rcu_read_unlock();
 
        return err;
 }
 
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ * @params:    hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+       struct rhashtable *ht, struct rhash_head *obj,
+       const struct rhashtable_params params)
+{
+       return __rhashtable_remove_fast(ht, obj, params, false);
+}
+
+/**
+ * rhltable_remove - remove object from hash list table
+ * @hlt:       hash list table
+ * @list:      pointer to hash list head inside object
+ * @params:    hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhltable_remove(
+       struct rhltable *hlt, struct rhlist_head *list,
+       const struct rhashtable_params params)
+{
+       return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
+}
+
 /* Internal function, please use rhashtable_replace_fast() instead */
 static inline int __rhashtable_replace_fast(
        struct rhashtable *ht, struct bucket_table *tbl,
@@ -958,4 +1174,51 @@ static inline int rhashtable_walk_init(struct rhashtable *ht,
        return 0;
 }
 
+/**
+ * rhltable_walk_enter - Initialise an iterator
+ * @hlt:       Table to walk over
+ * @iter:      Hash table Iterator
+ *
+ * This function prepares a hash table walk.
+ *
+ * Note that if you restart a walk after rhashtable_walk_stop you
+ * may see the same object twice.  Also, you may miss objects if
+ * there are removals in between rhashtable_walk_stop and the next
+ * call to rhashtable_walk_start.
+ *
+ * For a completely stable walk you should construct your own data
+ * structure outside the hash table.
+ *
+ * This function may sleep so you must not call it from interrupt
+ * context or with spin locks held.
+ *
+ * You must call rhashtable_walk_exit after this function returns.
+ */
+static inline void rhltable_walk_enter(struct rhltable *hlt,
+                                      struct rhashtable_iter *iter)
+{
+       return rhashtable_walk_enter(&hlt->ht, iter);
+}
+
+/**
+ * rhltable_free_and_destroy - free elements and destroy hash list table
+ * @hlt:       the hash list table to destroy
+ * @free_fn:   callback to release resources of element
+ * @arg:       pointer passed to free_fn
+ *
+ * See documentation for rhashtable_free_and_destroy.
+ */
+static inline void rhltable_free_and_destroy(struct rhltable *hlt,
+                                            void (*free_fn)(void *ptr,
+                                                            void *arg),
+                                            void *arg)
+{
+       return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
+}
+
+static inline void rhltable_destroy(struct rhltable *hlt)
+{
+       return rhltable_free_and_destroy(hlt, NULL, NULL);
+}
+
 #endif /* _LINUX_RHASHTABLE_H */
index 5164bd7..9fd2bea 100644 (file)
@@ -50,9 +50,11 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
+int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi);
 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
 int ife_validate_meta_u32(void *val, int len);
 int ife_validate_meta_u16(void *val, int len);
+int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi);
 void ife_release_meta_gen(struct tcf_meta_info *mi);
 int register_ife_op(struct tcf_meta_ops *mops);
 int unregister_ife_op(struct tcf_meta_ops *mops);
index 4ece02a..cd18360 100644 (file)
@@ -32,8 +32,9 @@ enum {
 #define IFE_META_HASHID 2
 #define        IFE_META_PRIO 3
 #define        IFE_META_QMAP 4
+#define        IFE_META_TCINDEX 5
 /*Can be overridden at runtime by module option*/
-#define        __IFE_META_MAX 5
+#define        __IFE_META_MAX 6
 #define IFE_META_MAX (__IFE_META_MAX - 1)
 
 #endif
index 06c2872..32d0ad0 100644 (file)
@@ -378,22 +378,8 @@ static void rht_deferred_worker(struct work_struct *work)
                schedule_work(&ht->run_work);
 }
 
-static bool rhashtable_check_elasticity(struct rhashtable *ht,
-                                       struct bucket_table *tbl,
-                                       unsigned int hash)
-{
-       unsigned int elasticity = ht->elasticity;
-       struct rhash_head *head;
-
-       rht_for_each(head, tbl, hash)
-               if (!--elasticity)
-                       return true;
-
-       return false;
-}
-
-int rhashtable_insert_rehash(struct rhashtable *ht,
-                            struct bucket_table *tbl)
+static int rhashtable_insert_rehash(struct rhashtable *ht,
+                                   struct bucket_table *tbl)
 {
        struct bucket_table *old_tbl;
        struct bucket_table *new_tbl;
@@ -439,57 +425,165 @@ fail:
 
        return err;
 }
-EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
 
-struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
-                                           const void *key,
-                                           struct rhash_head *obj,
-                                           struct bucket_table *tbl,
-                                           void **data)
+static void *rhashtable_lookup_one(struct rhashtable *ht,
+                                  struct bucket_table *tbl, unsigned int hash,
+                                  const void *key, struct rhash_head *obj)
 {
+       struct rhashtable_compare_arg arg = {
+               .ht = ht,
+               .key = key,
+       };
+       struct rhash_head __rcu **pprev;
        struct rhash_head *head;
-       unsigned int hash;
-       int err;
+       int elasticity;
 
-       tbl = rhashtable_last_table(ht, tbl);
-       hash = head_hashfn(ht, tbl, obj);
-       spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
-
-       err = -EEXIST;
-       if (key) {
-               *data = rhashtable_lookup_fast(ht, key, ht->p);
-               if (*data)
-                       goto exit;
+       elasticity = ht->elasticity;
+       pprev = &tbl->buckets[hash];
+       rht_for_each(head, tbl, hash) {
+               struct rhlist_head *list;
+               struct rhlist_head *plist;
+
+               elasticity--;
+               if (!key ||
+                   (ht->p.obj_cmpfn ?
+                    ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
+                    rhashtable_compare(&arg, rht_obj(ht, head))))
+                       continue;
+
+               if (!ht->rhlist)
+                       return rht_obj(ht, head);
+
+               list = container_of(obj, struct rhlist_head, rhead);
+               plist = container_of(head, struct rhlist_head, rhead);
+
+               RCU_INIT_POINTER(list->next, plist);
+               head = rht_dereference_bucket(head->next, tbl, hash);
+               RCU_INIT_POINTER(list->rhead.next, head);
+               rcu_assign_pointer(*pprev, obj);
+
+               return NULL;
        }
 
-       err = -E2BIG;
-       if (unlikely(rht_grow_above_max(ht, tbl)))
-               goto exit;
+       if (elasticity <= 0)
+               return ERR_PTR(-EAGAIN);
+
+       return ERR_PTR(-ENOENT);
+}
+
+static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
+                                                 struct bucket_table *tbl,
+                                                 unsigned int hash,
+                                                 struct rhash_head *obj,
+                                                 void *data)
+{
+       struct bucket_table *new_tbl;
+       struct rhash_head *head;
+
+       if (!IS_ERR_OR_NULL(data))
+               return ERR_PTR(-EEXIST);
 
-       err = -EAGAIN;
-       if (rhashtable_check_elasticity(ht, tbl, hash) ||
-           rht_grow_above_100(ht, tbl))
-               goto exit;
+       if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
+               return ERR_CAST(data);
 
-       err = 0;
+       new_tbl = rcu_dereference(tbl->future_tbl);
+       if (new_tbl)
+               return new_tbl;
+
+       if (PTR_ERR(data) != -ENOENT)
+               return ERR_CAST(data);
+
+       if (unlikely(rht_grow_above_max(ht, tbl)))
+               return ERR_PTR(-E2BIG);
+
+       if (unlikely(rht_grow_above_100(ht, tbl)))
+               return ERR_PTR(-EAGAIN);
 
        head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
 
        RCU_INIT_POINTER(obj->next, head);
+       if (ht->rhlist) {
+               struct rhlist_head *list;
+
+               list = container_of(obj, struct rhlist_head, rhead);
+               RCU_INIT_POINTER(list->next, NULL);
+       }
 
        rcu_assign_pointer(tbl->buckets[hash], obj);
 
        atomic_inc(&ht->nelems);
+       if (rht_grow_above_75(ht, tbl))
+               schedule_work(&ht->run_work);
 
-exit:
-       spin_unlock(rht_bucket_lock(tbl, hash));
+       return NULL;
+}
 
-       if (err == 0)
-               return NULL;
-       else if (err == -EAGAIN)
-               return tbl;
-       else
-               return ERR_PTR(err);
+static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
+                                  struct rhash_head *obj)
+{
+       struct bucket_table *new_tbl;
+       struct bucket_table *tbl;
+       unsigned int hash;
+       spinlock_t *lock;
+       void *data;
+
+       tbl = rcu_dereference(ht->tbl);
+
+       /* All insertions must grab the oldest table containing
+        * the hashed bucket that is yet to be rehashed.
+        */
+       for (;;) {
+               hash = rht_head_hashfn(ht, tbl, obj, ht->p);
+               lock = rht_bucket_lock(tbl, hash);
+               spin_lock_bh(lock);
+
+               if (tbl->rehash <= hash)
+                       break;
+
+               spin_unlock_bh(lock);
+               tbl = rcu_dereference(tbl->future_tbl);
+       }
+
+       data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
+       new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
+       if (PTR_ERR(new_tbl) != -EEXIST)
+               data = ERR_CAST(new_tbl);
+
+       while (!IS_ERR_OR_NULL(new_tbl)) {
+               tbl = new_tbl;
+               hash = rht_head_hashfn(ht, tbl, obj, ht->p);
+               spin_lock_nested(rht_bucket_lock(tbl, hash),
+                                SINGLE_DEPTH_NESTING);
+
+               data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
+               new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
+               if (PTR_ERR(new_tbl) != -EEXIST)
+                       data = ERR_CAST(new_tbl);
+
+               spin_unlock(rht_bucket_lock(tbl, hash));
+       }
+
+       spin_unlock_bh(lock);
+
+       if (PTR_ERR(data) == -EAGAIN)
+               data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
+                              -EAGAIN);
+
+       return data;
+}
+
+void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+                            struct rhash_head *obj)
+{
+       void *data;
+
+       do {
+               rcu_read_lock();
+               data = rhashtable_try_insert(ht, key, obj);
+               rcu_read_unlock();
+       } while (PTR_ERR(data) == -EAGAIN);
+
+       return data;
 }
 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
 
@@ -593,11 +687,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start);
 void *rhashtable_walk_next(struct rhashtable_iter *iter)
 {
        struct bucket_table *tbl = iter->walker.tbl;
+       struct rhlist_head *list = iter->list;
        struct rhashtable *ht = iter->ht;
        struct rhash_head *p = iter->p;
+       bool rhlist = ht->rhlist;
 
        if (p) {
-               p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
+               if (!rhlist || !(list = rcu_dereference(list->next))) {
+                       p = rcu_dereference(p->next);
+                       list = container_of(p, struct rhlist_head, rhead);
+               }
                goto next;
        }
 
@@ -605,6 +704,18 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
                int skip = iter->skip;
 
                rht_for_each_rcu(p, tbl, iter->slot) {
+                       if (rhlist) {
+                               list = container_of(p, struct rhlist_head,
+                                                   rhead);
+                               do {
+                                       if (!skip)
+                                               goto next;
+                                       skip--;
+                                       list = rcu_dereference(list->next);
+                               } while (list);
+
+                               continue;
+                       }
                        if (!skip)
                                break;
                        skip--;
@@ -614,7 +725,8 @@ next:
                if (!rht_is_a_nulls(p)) {
                        iter->skip++;
                        iter->p = p;
-                       return rht_obj(ht, p);
+                       iter->list = list;
+                       return rht_obj(ht, rhlist ? &list->rhead : p);
                }
 
                iter->skip = 0;
@@ -802,6 +914,48 @@ int rhashtable_init(struct rhashtable *ht,
 }
 EXPORT_SYMBOL_GPL(rhashtable_init);
 
+/**
+ * rhltable_init - initialize a new hash list table
+ * @hlt:       hash list table to be initialized
+ * @params:    configuration parameters
+ *
+ * Initializes a new hash list table.
+ *
+ * See documentation for rhashtable_init.
+ */
+int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
+{
+       int err;
+
+       /* No rhlist NULLs marking for now. */
+       if (params->nulls_base)
+               return -EINVAL;
+
+       err = rhashtable_init(&hlt->ht, params);
+       hlt->ht.rhlist = true;
+       return err;
+}
+EXPORT_SYMBOL_GPL(rhltable_init);
+
+static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
+                               void (*free_fn)(void *ptr, void *arg),
+                               void *arg)
+{
+       struct rhlist_head *list;
+
+       if (!ht->rhlist) {
+               free_fn(rht_obj(ht, obj), arg);
+               return;
+       }
+
+       list = container_of(obj, struct rhlist_head, rhead);
+       do {
+               obj = &list->rhead;
+               list = rht_dereference(list->next, ht);
+               free_fn(rht_obj(ht, obj), arg);
+       } while (list);
+}
+
 /**
  * rhashtable_free_and_destroy - free elements and destroy hash table
  * @ht:                the hash table to destroy
@@ -839,7 +993,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                             pos = next,
                             next = !rht_is_a_nulls(pos) ?
                                        rht_dereference(pos->next, ht) : NULL)
-                               free_fn(rht_obj(ht, pos), arg);
+                               rhashtable_free_one(ht, pos, free_fn, arg);
                }
        }
 
index 1e329d4..7bf82a2 100644 (file)
@@ -3097,11 +3097,31 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
        sg = !!(features & NETIF_F_SG);
        csum = !!can_checksum_protocol(features, proto);
 
-       /* GSO partial only requires that we trim off any excess that
-        * doesn't fit into an MSS sized block, so take care of that
-        * now.
-        */
-       if (sg && csum && (features & NETIF_F_GSO_PARTIAL)) {
+       if (sg && csum && (mss != GSO_BY_FRAGS))  {
+               if (!(features & NETIF_F_GSO_PARTIAL)) {
+                       struct sk_buff *iter;
+
+                       if (!list_skb ||
+                           !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
+                               goto normal;
+
+                       /* Split the buffer at the frag_list pointer.
+                        * This is based on the assumption that all
+                        * buffers in the chain excluding the last
+                        * containing the same amount of data.
+                        */
+                       skb_walk_frags(head_skb, iter) {
+                               if (skb_headlen(iter))
+                                       goto normal;
+
+                               len -= iter->len;
+                       }
+               }
+
+               /* GSO partial only requires that we trim off any excess that
+                * doesn't fit into an MSS sized block, so take care of that
+                * now.
+                */
                partial_segs = len / mss;
                if (partial_segs > 1)
                        mss *= partial_segs;
@@ -3109,6 +3129,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
                        partial_segs = 0;
        }
 
+normal:
        headroom = skb_headroom(head_skb);
        pos = skb_headlen(head_skb);
 
@@ -3300,21 +3321,29 @@ perform_csum_check:
         */
        segs->prev = tail;
 
-       /* Update GSO info on first skb in partial sequence. */
        if (partial_segs) {
+               struct sk_buff *iter;
                int type = skb_shinfo(head_skb)->gso_type;
+               unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
 
                /* Update type to add partial and then remove dodgy if set */
-               type |= SKB_GSO_PARTIAL;
+               type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
                type &= ~SKB_GSO_DODGY;
 
                /* Update GSO info and prepare to start updating headers on
                 * our way back down the stack of protocols.
                 */
-               skb_shinfo(segs)->gso_size = skb_shinfo(head_skb)->gso_size;
-               skb_shinfo(segs)->gso_segs = partial_segs;
-               skb_shinfo(segs)->gso_type = type;
-               SKB_GSO_CB(segs)->data_offset = skb_headroom(segs) + doffset;
+               for (iter = segs; iter; iter = iter->next) {
+                       skb_shinfo(iter)->gso_size = gso_size;
+                       skb_shinfo(iter)->gso_segs = partial_segs;
+                       skb_shinfo(iter)->gso_type = type;
+                       SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
+               }
+
+               if (tail->len - doffset <= gso_size)
+                       skb_shinfo(tail)->gso_size = 0;
+               else if (tail != segs)
+                       skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
        }
 
        /* Following permits correct backpressure, for protocols
index 66e31ac..a6902c1 100644 (file)
@@ -378,9 +378,11 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
        if (ret < 0)
                goto out;
 
-       ret = ops->set_addr(ds, dst->master_netdev->dev_addr);
-       if (ret < 0)
-               goto out;
+       if (ops->set_addr) {
+               ret = ops->set_addr(ds, dst->master_netdev->dev_addr);
+               if (ret < 0)
+                       goto out;
+       }
 
        if (!ds->slave_mii_bus && ops->phy_read) {
                ds->slave_mii_bus = devm_mdiobus_alloc(parent);
index 8278385..f8a7d9a 100644 (file)
@@ -304,13 +304,11 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
        if (err < 0)
                return err;
 
-       err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
-       if (err < 0)
-               return err;
-
-       err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
-       if (err < 0)
-               return err;
+       if (ds->ops->set_addr) {
+               err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
+               if (err < 0)
+                       return err;
+       }
 
        if (!ds->slave_mii_bus && ds->ops->phy_read) {
                ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
index e94b47b..1effc98 100644 (file)
@@ -1192,7 +1192,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                 netdev_features_t features)
 {
-       bool udpfrag = false, fixedid = false, encap;
+       bool udpfrag = false, fixedid = false, gso_partial, encap;
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        const struct net_offload *ops;
        unsigned int offset = 0;
@@ -1245,6 +1245,8 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
        if (IS_ERR_OR_NULL(segs))
                goto out;
 
+       gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
        skb = segs;
        do {
                iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
@@ -1259,9 +1261,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                iph->id = htons(id);
                                id += skb_shinfo(skb)->gso_segs;
                        }
-                       tot_len = skb_shinfo(skb)->gso_size +
-                                 SKB_GSO_CB(skb)->data_offset +
-                                 skb->head - (unsigned char *)iph;
+
+                       if (gso_partial)
+                               tot_len = skb_shinfo(skb)->gso_size +
+                                         SKB_GSO_CB(skb)->data_offset +
+                                         skb->head - (unsigned char *)iph;
+                       else
+                               tot_len = skb->len - nhoff;
                } else {
                        if (!fixedid)
                                iph->id = htons(id++);
index ecd1e09..96e0efe 100644 (file)
@@ -24,7 +24,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
        __be16 protocol = skb->protocol;
        u16 mac_len = skb->mac_len;
        int gre_offset, outer_hlen;
-       bool need_csum, ufo;
+       bool need_csum, ufo, gso_partial;
 
        if (!skb->encapsulation)
                goto out;
@@ -69,6 +69,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
                goto out;
        }
 
+       gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
        outer_hlen = skb_tnl_header_len(skb);
        gre_offset = outer_hlen - tnl_hlen;
        skb = segs;
@@ -96,7 +98,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
                greh = (struct gre_base_hdr *)skb_transport_header(skb);
                pcsum = (__sum16 *)(greh + 1);
 
-               if (skb_is_gso(skb)) {
+               if (gso_partial) {
                        unsigned int partial_adj;
 
                        /* Adjust checksum to account for the fact that
index 5c59649..bc68da3 100644 (file)
@@ -90,12 +90,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
                goto out;
        }
 
-       /* GSO partial only requires splitting the frame into an MSS
-        * multiple and possibly a remainder.  So update the mss now.
-        */
-       if (features & NETIF_F_GSO_PARTIAL)
-               mss = skb->len - (skb->len % mss);
-
        copy_destructor = gso_skb->destructor == tcp_wfree;
        ooo_okay = gso_skb->ooo_okay;
        /* All segments but the first should have ooo_okay cleared */
@@ -108,6 +102,13 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
        /* Only first segment might have ooo_okay set */
        segs->ooo_okay = ooo_okay;
 
+       /* GSO partial and frag_list segmentation only requires splitting
+        * the frame into an MSS multiple and possibly a remainder, both
+        * cases return a GSO skb. So update the mss now.
+        */
+       if (skb_is_gso(segs))
+               mss *= skb_shinfo(segs)->gso_segs;
+
        delta = htonl(oldlen + (thlen + mss));
 
        skb = segs;
index 81f253b..f9333c9 100644 (file)
@@ -21,7 +21,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
        __be16 new_protocol, bool is_ipv6)
 {
        int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
-       bool remcsum, need_csum, offload_csum, ufo;
+       bool remcsum, need_csum, offload_csum, ufo, gso_partial;
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct udphdr *uh = udp_hdr(skb);
        u16 mac_offset = skb->mac_header;
@@ -88,6 +88,8 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
                goto out;
        }
 
+       gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
        outer_hlen = skb_tnl_header_len(skb);
        udp_offset = outer_hlen - tnl_hlen;
        skb = segs;
@@ -117,7 +119,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
                 * will be using a length value equal to only one MSS sized
                 * segment instead of the entire frame.
                 */
-               if (skb_is_gso(skb)) {
+               if (gso_partial) {
                        uh->len = htons(skb_shinfo(skb)->gso_size +
                                        SKB_GSO_CB(skb)->data_offset +
                                        skb->head - (unsigned char *)uh);
index 22e90e5..e7bfd55 100644 (file)
@@ -69,6 +69,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        int offset = 0;
        bool encap, udpfrag;
        int nhoff;
+       bool gso_partial;
 
        skb_reset_network_header(skb);
        nhoff = skb_network_header(skb) - skb_mac_header(skb);
@@ -101,9 +102,11 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        if (IS_ERR(segs))
                goto out;
 
+       gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
        for (skb = segs; skb; skb = skb->next) {
                ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
-               if (skb_is_gso(skb))
+               if (gso_partial)
                        payload_len = skb_shinfo(skb)->gso_size +
                                      SKB_GSO_CB(skb)->data_offset +
                                      skb->head - (unsigned char *)(ipv6h + 1);
index c71c735..e496dee 100644 (file)
@@ -1213,7 +1213,7 @@ struct ieee80211_local {
        spinlock_t tim_lock;
        unsigned long num_sta;
        struct list_head sta_list;
-       struct rhashtable sta_hash;
+       struct rhltable sta_hash;
        struct timer_list sta_cleanup;
        int sta_generation;
 
index e796060..f7cf342 100644 (file)
@@ -4003,7 +4003,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
        __le16 fc;
        struct ieee80211_rx_data rx;
        struct ieee80211_sub_if_data *prev;
-       struct rhash_head *tmp;
+       struct rhlist_head *tmp;
        int err = 0;
 
        fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
@@ -4046,13 +4046,10 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
                goto out;
        } else if (ieee80211_is_data(fc)) {
                struct sta_info *sta, *prev_sta;
-               const struct bucket_table *tbl;
 
                prev_sta = NULL;
 
-               tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
-               for_each_sta_info(local, tbl, hdr->addr2, sta, tmp) {
+               for_each_sta_info(local, hdr->addr2, sta, tmp) {
                        if (!prev_sta) {
                                prev_sta = sta;
                                continue;
index 1b1b28f..c803e2c 100644 (file)
 
 static const struct rhashtable_params sta_rht_params = {
        .nelem_hint = 3, /* start small */
-       .insecure_elasticity = true, /* Disable chain-length checks. */
        .automatic_shrinking = true,
        .head_offset = offsetof(struct sta_info, hash_node),
        .key_offset = offsetof(struct sta_info, addr),
        .key_len = ETH_ALEN,
-       .hashfn = sta_addr_hash,
        .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
 };
 
@@ -80,8 +78,8 @@ static const struct rhashtable_params sta_rht_params = {
 static int sta_info_hash_del(struct ieee80211_local *local,
                             struct sta_info *sta)
 {
-       return rhashtable_remove_fast(&local->sta_hash, &sta->hash_node,
-                                     sta_rht_params);
+       return rhltable_remove(&local->sta_hash, &sta->hash_node,
+                              sta_rht_params);
 }
 
 static void __cleanup_single_sta(struct sta_info *sta)
@@ -157,19 +155,22 @@ static void cleanup_single_sta(struct sta_info *sta)
        sta_info_free(local, sta);
 }
 
+struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local,
+                                        const u8 *addr)
+{
+       return rhltable_lookup(&local->sta_hash, addr, sta_rht_params);
+}
+
 /* protected by RCU */
 struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
                              const u8 *addr)
 {
        struct ieee80211_local *local = sdata->local;
+       struct rhlist_head *tmp;
        struct sta_info *sta;
-       struct rhash_head *tmp;
-       const struct bucket_table *tbl;
 
        rcu_read_lock();
-       tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
-       for_each_sta_info(local, tbl, addr, sta, tmp) {
+       for_each_sta_info(local, addr, sta, tmp) {
                if (sta->sdata == sdata) {
                        rcu_read_unlock();
                        /* this is safe as the caller must already hold
@@ -190,14 +191,11 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
                                  const u8 *addr)
 {
        struct ieee80211_local *local = sdata->local;
+       struct rhlist_head *tmp;
        struct sta_info *sta;
-       struct rhash_head *tmp;
-       const struct bucket_table *tbl;
 
        rcu_read_lock();
-       tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
-       for_each_sta_info(local, tbl, addr, sta, tmp) {
+       for_each_sta_info(local, addr, sta, tmp) {
                if (sta->sdata == sdata ||
                    (sta->sdata->bss && sta->sdata->bss == sdata->bss)) {
                        rcu_read_unlock();
@@ -263,8 +261,8 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
 static int sta_info_hash_add(struct ieee80211_local *local,
                             struct sta_info *sta)
 {
-       return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
-                                     sta_rht_params);
+       return rhltable_insert(&local->sta_hash, &sta->hash_node,
+                              sta_rht_params);
 }
 
 static void sta_deliver_ps_frames(struct work_struct *wk)
@@ -453,9 +451,9 @@ static int sta_info_insert_check(struct sta_info *sta)
                    is_multicast_ether_addr(sta->sta.addr)))
                return -EINVAL;
 
-       /* Strictly speaking this isn't necessary as we hold the mutex, but
-        * the rhashtable code can't really deal with that distinction. We
-        * do require the mutex for correctness though.
+       /* The RCU read lock is required by rhashtable due to
+        * asynchronous resize/rehash.  We also require the mutex
+        * for correctness.
         */
        rcu_read_lock();
        lockdep_assert_held(&sdata->local->sta_mtx);
@@ -1043,16 +1041,11 @@ static void sta_info_cleanup(unsigned long data)
                  round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL));
 }
 
-u32 sta_addr_hash(const void *key, u32 length, u32 seed)
-{
-       return jhash(key, ETH_ALEN, seed);
-}
-
 int sta_info_init(struct ieee80211_local *local)
 {
        int err;
 
-       err = rhashtable_init(&local->sta_hash, &sta_rht_params);
+       err = rhltable_init(&local->sta_hash, &sta_rht_params);
        if (err)
                return err;
 
@@ -1068,7 +1061,7 @@ int sta_info_init(struct ieee80211_local *local)
 void sta_info_stop(struct ieee80211_local *local)
 {
        del_timer_sync(&local->sta_cleanup);
-       rhashtable_destroy(&local->sta_hash);
+       rhltable_destroy(&local->sta_hash);
 }
 
 
@@ -1138,17 +1131,14 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
                                                   const u8 *localaddr)
 {
        struct ieee80211_local *local = hw_to_local(hw);
+       struct rhlist_head *tmp;
        struct sta_info *sta;
-       struct rhash_head *tmp;
-       const struct bucket_table *tbl;
-
-       tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
 
        /*
         * Just return a random station if localaddr is NULL
         * ... first in list.
         */
-       for_each_sta_info(local, tbl, addr, sta, tmp) {
+       for_each_sta_info(local, addr, sta, tmp) {
                if (localaddr &&
                    !ether_addr_equal(sta->sdata->vif.addr, localaddr))
                        continue;
index 530231b..ed5fcb9 100644 (file)
@@ -455,7 +455,7 @@ struct sta_info {
        /* General information, mostly static */
        struct list_head list, free_list;
        struct rcu_head rcu_head;
-       struct rhash_head hash_node;
+       struct rhlist_head hash_node;
        u8 addr[ETH_ALEN];
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
@@ -638,6 +638,9 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
  */
 #define STA_INFO_CLEANUP_INTERVAL (10 * HZ)
 
+struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local,
+                                        const u8 *addr);
+
 /*
  * Get a STA info, must be under RCU read lock.
  */
@@ -647,17 +650,9 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
 struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
                                  const u8 *addr);
 
-u32 sta_addr_hash(const void *key, u32 length, u32 seed);
-
-#define _sta_bucket_idx(_tbl, _a)                                      \
-       rht_bucket_index(_tbl, sta_addr_hash(_a, ETH_ALEN, (_tbl)->hash_rnd))
-
-#define for_each_sta_info(local, tbl, _addr, _sta, _tmp)               \
-       rht_for_each_entry_rcu(_sta, _tmp, tbl,                         \
-                              _sta_bucket_idx(tbl, _addr),             \
-                              hash_node)                               \
-       /* compare address and run code only if it matches */           \
-       if (ether_addr_equal(_sta->addr, (_addr)))
+#define for_each_sta_info(local, _addr, _sta, _tmp)                    \
+       rhl_for_each_entry_rcu(_sta, _tmp,                              \
+                              sta_info_hash_lookup(local, _addr), hash_node)
 
 /*
  * Get STA info by index, BROKEN!
index ea39f8a..ddf71c6 100644 (file)
@@ -746,8 +746,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        __le16 fc;
        struct ieee80211_supported_band *sband;
+       struct rhlist_head *tmp;
        struct sta_info *sta;
-       struct rhash_head *tmp;
        int retry_count;
        int rates_idx;
        bool send_to_cooked;
@@ -755,7 +755,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
        struct ieee80211_bar *bar;
        int shift = 0;
        int tid = IEEE80211_NUM_TIDS;
-       const struct bucket_table *tbl;
 
        rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
 
@@ -764,9 +763,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
        sband = local->hw.wiphy->bands[info->band];
        fc = hdr->frame_control;
 
-       tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
-       for_each_sta_info(local, tbl, hdr->addr1, sta, tmp) {
+       for_each_sta_info(local, hdr->addr1, sta, tmp) {
                /* skip wrong virtual interface */
                if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
                        continue;
index 7795d5a..87956a7 100644 (file)
@@ -793,6 +793,11 @@ config NET_IFE_SKBPRIO
         depends on NET_ACT_IFE
         ---help---
 
+config NET_IFE_SKBTCINDEX
+        tristate "Support to encoding decoding skb tcindex on IFE action"
+        depends on NET_ACT_IFE
+        ---help---
+
 config NET_CLS_IND
        bool "Incoming device classification"
        depends on NET_CLS_U32 || NET_CLS_FW
index 148ae0d..4bdda36 100644 (file)
@@ -23,6 +23,7 @@ obj-$(CONFIG_NET_ACT_SKBMOD)  += act_skbmod.o
 obj-$(CONFIG_NET_ACT_IFE)      += act_ife.o
 obj-$(CONFIG_NET_IFE_SKBMARK)  += act_meta_mark.o
 obj-$(CONFIG_NET_IFE_SKBPRIO)  += act_meta_skbprio.o
+obj-$(CONFIG_NET_IFE_SKBTCINDEX)       += act_meta_skbtcindex.o
 obj-$(CONFIG_NET_ACT_TUNNEL_KEY)+= act_tunnel_key.o
 obj-$(CONFIG_NET_SCH_FIFO)     += sch_fifo.o
 obj-$(CONFIG_NET_SCH_CBQ)      += sch_cbq.o
index d09d068..d0aceb1 100644 (file)
@@ -592,9 +592,8 @@ err_out:
        return ERR_PTR(err);
 }
 
-int tcf_action_init(struct net *net, struct nlattr *nla,
-                                 struct nlattr *est, char *name, int ovr,
-                                 int bind, struct list_head *actions)
+int tcf_action_init(struct net *net, struct nlattr *nla, struct nlattr *est,
+                   char *name, int ovr, int bind, struct list_head *actions)
 {
        struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
        struct tc_action *act;
@@ -923,9 +922,8 @@ tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
        return err;
 }
 
-static int
-tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
-              u32 portid, int ovr)
+static int tcf_action_add(struct net *net, struct nlattr *nla,
+                         struct nlmsghdr *n, u32 portid, int ovr)
 {
        int ret = 0;
        LIST_HEAD(actions);
@@ -988,8 +986,7 @@ replay:
        return ret;
 }
 
-static struct nlattr *
-find_dump_kind(const struct nlmsghdr *n)
+static struct nlattr *find_dump_kind(const struct nlmsghdr *n)
 {
        struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
        struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
@@ -1016,8 +1013,7 @@ find_dump_kind(const struct nlmsghdr *n)
        return kind;
 }
 
-static int
-tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
+static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
        struct nlmsghdr *nlh;
index b5dbf63..e0defce 100644 (file)
@@ -116,8 +116,8 @@ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
                return (void *)(skb_network_header(skb) + ihl);
 }
 
-static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
-                             unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
+                             unsigned int ipl)
 {
        struct icmphdr *icmph;
 
@@ -152,8 +152,8 @@ static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
        return 1;
 }
 
-static int tcf_csum_ipv6_icmp(struct sk_buff *skb,
-                             unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
+                             unsigned int ipl)
 {
        struct icmp6hdr *icmp6h;
        const struct ipv6hdr *ip6h;
@@ -174,8 +174,8 @@ static int tcf_csum_ipv6_icmp(struct sk_buff *skb,
        return 1;
 }
 
-static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
-                            unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
+                            unsigned int ipl)
 {
        struct tcphdr *tcph;
        const struct iphdr *iph;
@@ -195,8 +195,8 @@ static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
        return 1;
 }
 
-static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
-                            unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
+                            unsigned int ipl)
 {
        struct tcphdr *tcph;
        const struct ipv6hdr *ip6h;
@@ -217,8 +217,8 @@ static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
        return 1;
 }
 
-static int tcf_csum_ipv4_udp(struct sk_buff *skb,
-                            unsigned int ihl, unsigned int ipl, int udplite)
+static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
+                            unsigned int ipl, int udplite)
 {
        struct udphdr *udph;
        const struct iphdr *iph;
@@ -270,8 +270,8 @@ ignore_obscure_skb:
        return 1;
 }
 
-static int tcf_csum_ipv6_udp(struct sk_buff *skb,
-                            unsigned int ihl, unsigned int ipl, int udplite)
+static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
+                            unsigned int ipl, int udplite)
 {
        struct udphdr *udph;
        const struct ipv6hdr *ip6h;
@@ -380,8 +380,8 @@ fail:
        return 0;
 }
 
-static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
-                                unsigned int ixhl, unsigned int *pl)
+static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
+                                unsigned int *pl)
 {
        int off, len, optlen;
        unsigned char *xh = (void *)ip6xh;
@@ -494,8 +494,8 @@ fail:
        return 0;
 }
 
-static int tcf_csum(struct sk_buff *skb,
-                   const struct tc_action *a, struct tcf_result *res)
+static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
+                   struct tcf_result *res)
 {
        struct tcf_csum *p = to_tcf_csum(a);
        int action;
@@ -531,8 +531,8 @@ drop:
        return TC_ACT_SHOT;
 }
 
-static int tcf_csum_dump(struct sk_buff *skb,
-                        struct tc_action *a, int bind, int ref)
+static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+                        int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_csum *p = to_tcf_csum(a);
index e24a409..e0aa30f 100644 (file)
@@ -156,7 +156,8 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
        int action = READ_ONCE(gact->tcf_action);
        struct tcf_t *tm = &gact->tcf_tm;
 
-       _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes, packets);
+       _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes,
+                          packets);
        if (action == TC_ACT_SHOT)
                this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
 
index e87cd81..ccf7b4b 100644 (file)
@@ -63,6 +63,23 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
 }
 EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
 
+int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
+{
+       u16 edata = 0;
+
+       if (mi->metaval)
+               edata = *(u16 *)mi->metaval;
+       else if (metaval)
+               edata = metaval;
+
+       if (!edata) /* will not encode */
+               return 0;
+
+       edata = htons(edata);
+       return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata);
+}
+EXPORT_SYMBOL_GPL(ife_encode_meta_u16);
+
 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
 {
        if (mi->metaval)
@@ -81,6 +98,15 @@ int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
 }
 EXPORT_SYMBOL_GPL(ife_check_meta_u32);
 
+int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi)
+{
+       if (metaval || mi->metaval)
+               return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ife_check_meta_u16);
+
 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
 {
        u32 edata = metaval;
diff --git a/net/sched/act_meta_skbtcindex.c b/net/sched/act_meta_skbtcindex.c
new file mode 100644 (file)
index 0000000..3b35774
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * net/sched/act_meta_tc_index.c IFE skb->tc_index metadata module
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ * copyright Jamal Hadi Salim (2016)
+ *
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <uapi/linux/tc_act/tc_ife.h>
+#include <net/tc_act/tc_ife.h>
+#include <linux/rtnetlink.h>
+
+static int skbtcindex_encode(struct sk_buff *skb, void *skbdata,
+                            struct tcf_meta_info *e)
+{
+       u32 ifetc_index = skb->tc_index;
+
+       return ife_encode_meta_u16(ifetc_index, skbdata, e);
+}
+
+static int skbtcindex_decode(struct sk_buff *skb, void *data, u16 len)
+{
+       u16 ifetc_index = *(u16 *)data;
+
+       skb->tc_index = ntohs(ifetc_index);
+       return 0;
+}
+
+static int skbtcindex_check(struct sk_buff *skb, struct tcf_meta_info *e)
+{
+       return ife_check_meta_u16(skb->tc_index, e);
+}
+
+static struct tcf_meta_ops ife_skbtcindex_ops = {
+       .metaid = IFE_META_TCINDEX,
+       .metatype = NLA_U16,
+       .name = "tc_index",
+       .synopsis = "skb tc_index 16 bit metadata",
+       .check_presence = skbtcindex_check,
+       .encode = skbtcindex_encode,
+       .decode = skbtcindex_decode,
+       .get = ife_get_meta_u16,
+       .alloc = ife_alloc_meta_u16,
+       .release = ife_release_meta_gen,
+       .validate = ife_validate_meta_u16,
+       .owner = THIS_MODULE,
+};
+
+static int __init ifetc_index_init_module(void)
+{
+       return register_ife_op(&ife_skbtcindex_ops);
+}
+
+static void __exit ifetc_index_cleanup_module(void)
+{
+       unregister_ife_op(&ife_skbtcindex_ops);
+}
+
+module_init(ifetc_index_init_module);
+module_exit(ifetc_index_cleanup_module);
+
+MODULE_AUTHOR("Jamal Hadi Salim(2016)");
+MODULE_DESCRIPTION("Inter-FE skb tc_index metadata module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_IFE_META(IFE_META_SKBTCINDEX);
index 6038c85..1c76387 100644 (file)
@@ -204,7 +204,8 @@ out:
        return retval;
 }
 
-static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+                          int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_mirred *m = to_mirred(a);
index 8a3be1d..d1bd248 100644 (file)
@@ -249,6 +249,8 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
                        police->tcfp_t_c = now;
                        police->tcfp_toks = toks;
                        police->tcfp_ptoks = ptoks;
+                       if (police->tcfp_result == TC_ACT_SHOT)
+                               police->tcf_qstats.drops++;
                        spin_unlock(&police->tcf_lock);
                        return police->tcfp_result;
                }
@@ -261,8 +263,8 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
        return police->tcf_action;
 }
 
-static int
-tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a,
+                              int bind, int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_police *police = to_police(a);
@@ -347,14 +349,12 @@ static struct pernet_operations police_net_ops = {
        .size = sizeof(struct tc_action_net),
 };
 
-static int __init
-police_init_module(void)
+static int __init police_init_module(void)
 {
        return tcf_register_action(&act_police_ops, &police_net_ops);
 }
 
-static void __exit
-police_cleanup_module(void)
+static void __exit police_cleanup_module(void)
 {
        tcf_unregister_action(&act_police_ops, &police_net_ops);
 }
index a7c5645..11da7da 100644 (file)
@@ -344,13 +344,15 @@ replay:
                        if (err == 0) {
                                struct tcf_proto *next = rtnl_dereference(tp->next);
 
-                               tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
+                               tfilter_notify(net, skb, n, tp, fh,
+                                              RTM_DELTFILTER);
                                if (tcf_destroy(tp, false))
                                        RCU_INIT_POINTER(*back, next);
                        }
                        goto errout;
                case RTM_GETTFILTER:
-                       err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
+                       err = tfilter_notify(net, skb, n, tp, fh,
+                                            RTM_NEWTFILTER);
                        goto errout;
                default:
                        err = -EINVAL;
@@ -448,7 +450,8 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
        struct net *net = sock_net(a->skb->sk);
 
        return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
-                            a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
+                            a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                            RTM_NEWTFILTER);
 }
 
 /* called with RTNL */
@@ -552,7 +555,7 @@ void tcf_exts_destroy(struct tcf_exts *exts)
 EXPORT_SYMBOL(tcf_exts_destroy);
 
 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
-                 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
+                     struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
 {
 #ifdef CONFIG_NET_CLS_ACT
        {
@@ -560,8 +563,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
 
                if (exts->police && tb[exts->police]) {
                        act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
-                                               "police", ovr,
-                                               TCA_ACT_BIND);
+                                               "police", ovr, TCA_ACT_BIND);
                        if (IS_ERR(act))
                                return PTR_ERR(act);
 
@@ -573,8 +575,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
                        int err, i = 0;
 
                        err = tcf_action_init(net, tb[exts->action], rate_tlv,
-                                             NULL, ovr,
-                                             TCA_ACT_BIND, &actions);
+                                             NULL, ovr, TCA_ACT_BIND,
+                                             &actions);
                        if (err)
                                return err;
                        list_for_each_entry(act, &actions, list)
index 1d92d4d..c6f7a47 100644 (file)
@@ -55,7 +55,8 @@ static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
        [TCA_BPF_CLASSID]       = { .type = NLA_U32 },
        [TCA_BPF_FLAGS]         = { .type = NLA_U32 },
        [TCA_BPF_FD]            = { .type = NLA_U32 },
-       [TCA_BPF_NAME]          = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
+       [TCA_BPF_NAME]          = { .type = NLA_NUL_STRING,
+                                   .len = CLS_BPF_NAME_LEN },
        [TCA_BPF_OPS_LEN]       = { .type = NLA_U16 },
        [TCA_BPF_OPS]           = { .type = NLA_BINARY,
                                    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
@@ -409,7 +410,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                goto errout;
        }
 
-       ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
+       ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
+                                     ovr);
        if (ret < 0)
                goto errout;
 
index a379bae..e396723 100644 (file)
@@ -87,12 +87,14 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
        return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
 }
 
-static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_proto(const struct sk_buff *skb,
+                         const struct flow_keys *flow)
 {
        return flow->basic.ip_proto;
 }
 
-static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_proto_src(const struct sk_buff *skb,
+                             const struct flow_keys *flow)
 {
        if (flow->ports.ports)
                return ntohs(flow->ports.src);
@@ -100,7 +102,8 @@ static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys
        return addr_fold(skb->sk);
 }
 
-static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_proto_dst(const struct sk_buff *skb,
+                             const struct flow_keys *flow)
 {
        if (flow->ports.ports)
                return ntohs(flow->ports.dst);
@@ -149,7 +152,8 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
 })
 #endif
 
-static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_src(const struct sk_buff *skb,
+                            const struct flow_keys *flow)
 {
        switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
@@ -161,7 +165,8 @@ fallback:
        return flow_get_src(skb, flow);
 }
 
-static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_dst(const struct sk_buff *skb,
+                            const struct flow_keys *flow)
 {
        switch (tc_skb_protocol(skb)) {
        case htons(ETH_P_IP):
@@ -173,14 +178,16 @@ fallback:
        return flow_get_dst(skb, flow);
 }
 
-static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
+                                  const struct flow_keys *flow)
 {
        return ntohs(CTTUPLE(skb, src.u.all));
 fallback:
        return flow_get_proto_src(skb, flow);
 }
 
-static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
+                                  const struct flow_keys *flow)
 {
        return ntohs(CTTUPLE(skb, dst.u.all));
 fallback:
index a3f4c70..2af09c8 100644 (file)
@@ -241,7 +241,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
        tc.type = TC_SETUP_CLSFLOWER;
        tc.cls_flower = &offload;
 
-       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
+       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
+                                           &tc);
 
        if (tc_skip_sw(flags))
                return err;
index cc0bda9..9dc63d5 100644 (file)
@@ -57,7 +57,7 @@ static u32 fw_hash(u32 handle)
 }
 
 static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
-                         struct tcf_result *res)
+                      struct tcf_result *res)
 {
        struct fw_head *head = rcu_dereference_bh(tp->root);
        struct fw_filter *f;
@@ -188,7 +188,8 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
 
 static int
 fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
-       struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr)
+               struct nlattr **tb, struct nlattr **tca, unsigned long base,
+               bool ovr)
 {
        struct fw_head *head = rtnl_dereference(tp->root);
        struct tcf_exts e;
@@ -237,9 +238,8 @@ errout:
 
 static int fw_change(struct net *net, struct sk_buff *in_skb,
                     struct tcf_proto *tp, unsigned long base,
-                    u32 handle,
-                    struct nlattr **tca,
-                    unsigned long *arg, bool ovr)
+                    u32 handle, struct nlattr **tca, unsigned long *arg,
+                    bool ovr)
 {
        struct fw_head *head = rtnl_dereference(tp->root);
        struct fw_filter *f = (struct fw_filter *) *arg;
index c91e65d..a4ce39b 100644 (file)
@@ -268,8 +268,7 @@ static int route4_init(struct tcf_proto *tp)
        return 0;
 }
 
-static void
-route4_delete_filter(struct rcu_head *head)
+static void route4_delete_filter(struct rcu_head *head)
 {
        struct route4_filter *f = container_of(head, struct route4_filter, rcu);
 
@@ -474,10 +473,8 @@ errout:
 }
 
 static int route4_change(struct net *net, struct sk_buff *in_skb,
-                      struct tcf_proto *tp, unsigned long base,
-                      u32 handle,
-                      struct nlattr **tca,
-                      unsigned long *arg, bool ovr)
+                        struct tcf_proto *tp, unsigned long base, u32 handle,
+                        struct nlattr **tca, unsigned long *arg, bool ovr)
 {
        struct route4_head *head = rtnl_dereference(tp->root);
        struct route4_filter __rcu **fp;
index d950070..96144bd 100644 (file)
@@ -50,14 +50,13 @@ struct tcindex_data {
        struct rcu_head rcu;
 };
 
-static inline int
-tcindex_filter_is_set(struct tcindex_filter_result *r)
+static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
 {
        return tcf_exts_is_predicative(&r->exts) || r->res.classid;
 }
 
-static struct tcindex_filter_result *
-tcindex_lookup(struct tcindex_data *p, u16 key)
+static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
+                                                   u16 key)
 {
        if (p->perfect) {
                struct tcindex_filter_result *f = p->perfect + key;
@@ -144,7 +143,8 @@ static void tcindex_destroy_rexts(struct rcu_head *head)
 
 static void tcindex_destroy_fexts(struct rcu_head *head)
 {
-       struct tcindex_filter *f = container_of(head, struct tcindex_filter, rcu);
+       struct tcindex_filter *f = container_of(head, struct tcindex_filter,
+                                               rcu);
 
        tcf_exts_destroy(&f->result.exts);
        kfree(f);
@@ -550,7 +550,7 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force)
 
 
 static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
-    struct sk_buff *skb, struct tcmsg *t)
+                       struct sk_buff *skb, struct tcmsg *t)
 {
        struct tcindex_data *p = rtnl_dereference(tp->root);
        struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
index a29263a..ae83c3a 100644 (file)
@@ -104,7 +104,8 @@ static inline unsigned int u32_hash_fold(__be32 key,
        return h;
 }
 
-static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
+static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+                       struct tcf_result *res)
 {
        struct {
                struct tc_u_knode *knode;
@@ -256,8 +257,7 @@ deadloop:
        return -1;
 }
 
-static struct tc_u_hnode *
-u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
+static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
 {
        struct tc_u_hnode *ht;
 
@@ -270,8 +270,7 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
        return ht;
 }
 
-static struct tc_u_knode *
-u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
+static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
 {
        unsigned int sel;
        struct tc_u_knode *n = NULL;
@@ -360,8 +359,7 @@ static int u32_init(struct tcf_proto *tp)
        return 0;
 }
 
-static int u32_destroy_key(struct tcf_proto *tp,
-                          struct tc_u_knode *n,
+static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
                           bool free_pf)
 {
        tcf_exts_destroy(&n->exts);
@@ -448,9 +446,8 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
        }
 }
 
-static int u32_replace_hw_hnode(struct tcf_proto *tp,
-                                struct tc_u_hnode *h,
-                                u32 flags)
+static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
+                               u32 flags)
 {
        struct net_device *dev = tp->q->dev_queue->dev;
        struct tc_cls_u32_offload u32_offload = {0};
@@ -496,9 +493,8 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
        }
 }
 
-static int u32_replace_hw_knode(struct tcf_proto *tp,
-                                struct tc_u_knode *n,
-                                u32 flags)
+static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
+                               u32 flags)
 {
        struct net_device *dev = tp->q->dev_queue->dev;
        struct tc_cls_u32_offload u32_offload = {0};
@@ -763,8 +759,7 @@ errout:
        return err;
 }
 
-static void u32_replace_knode(struct tcf_proto *tp,
-                             struct tc_u_common *tp_c,
+static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
                              struct tc_u_knode *n)
 {
        struct tc_u_knode __rcu **ins;
@@ -845,8 +840,7 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
 
 static int u32_change(struct net *net, struct sk_buff *in_skb,
                      struct tcf_proto *tp, unsigned long base, u32 handle,
-                     struct nlattr **tca,
-                     unsigned long *arg, bool ovr)
+                     struct nlattr **tca, unsigned long *arg, bool ovr)
 {
        struct tc_u_common *tp_c = tp->data;
        struct tc_u_hnode *ht;
@@ -1088,7 +1082,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 }
 
 static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
-                    struct sk_buff *skb, struct tcmsg *t)
+                   struct sk_buff *skb, struct tcmsg *t)
 {
        struct tc_u_knode *n = (struct tc_u_knode *)fh;
        struct tc_u_hnode *ht_up, *ht_down;
index d677b34..206dc24 100644 (file)
@@ -389,7 +389,8 @@ static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
 
 static struct qdisc_rate_table *qdisc_rtab_list;
 
-struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
+struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+                                       struct nlattr *tab)
 {
        struct qdisc_rate_table *rtab;
 
@@ -541,7 +542,8 @@ nla_put_failure:
        return -1;
 }
 
-void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
+void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+                              const struct qdisc_size_table *stab)
 {
        int pkt_len, slot;
 
@@ -888,10 +890,10 @@ static struct lock_class_key qdisc_rx_lock;
    Parameters are passed via opt.
  */
 
-static struct Qdisc *
-qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
-            struct Qdisc *p, u32 parent, u32 handle,
-            struct nlattr **tca, int *errp)
+static struct Qdisc *qdisc_create(struct net_device *dev,
+                                 struct netdev_queue *dev_queue,
+                                 struct Qdisc *p, u32 parent, u32 handle,
+                                 struct nlattr **tca, int *errp)
 {
        int err;
        struct nlattr *kind = tca[TCA_KIND];
@@ -1073,7 +1075,8 @@ struct check_loop_arg {
        int                     depth;
 };
 
-static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
+static int check_loop_fn(struct Qdisc *q, unsigned long cl,
+                        struct qdisc_walker *w);
 
 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
 {
@@ -1450,7 +1453,8 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
        } else {
                if (!tc_qdisc_dump_ignore(q) &&
                    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
-                                 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+                                 cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                 RTM_NEWQDISC) <= 0)
                        goto done;
                q_idx++;
        }
@@ -1471,7 +1475,8 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
                }
                if (!tc_qdisc_dump_ignore(q) &&
                    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
-                                 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+                                 cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                 RTM_NEWQDISC) <= 0)
                        goto done;
                q_idx++;
        }
@@ -1505,7 +1510,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
                        s_q_idx = 0;
                q_idx = 0;
 
-               if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx, true) < 0)
+               if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
+                                      true) < 0)
                        goto done;
 
                dev_queue = dev_ingress_queue(dev);
@@ -1640,7 +1646,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
                        if (cops->delete)
                                err = cops->delete(q, cl);
                        if (err == 0)
-                               tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
+                               tclass_notify(net, skb, n, q, cl,
+                                             RTM_DELTCLASS);
                        goto out;
                case RTM_GETTCLASS:
                        err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
@@ -1738,12 +1745,14 @@ struct qdisc_dump_args {
        struct netlink_callback *cb;
 };
 
-static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
+static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
+                           struct qdisc_walker *arg)
 {
        struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
 
        return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
-                             a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
+                             a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                             RTM_NEWTCLASS);
 }
 
 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
@@ -1976,10 +1985,12 @@ static int __init pktsched_init(void)
 
        rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
        rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
-       rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
+       rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
+                     NULL);
        rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
        rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
-       rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
+       rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
+                     NULL);
 
        return 0;
 }