Merge tag 'mac80211-next-for-davem-2016-09-16' of git://git.kernel.org/pub/scm/linux...
authorDavid S. Miller <davem@davemloft.net>
Mon, 19 Sep 2016 02:29:08 +0000 (22:29 -0400)
committerDavid S. Miller <davem@davemloft.net>
Mon, 19 Sep 2016 02:29:08 +0000 (22:29 -0400)
Johannes Berg says:

====================
This time we have various things - all across the board:
 * MU-MIMO sniffer support in mac80211
 * a create_singlethread_workqueue() cleanup
 * interface dump filtering that was documented but not implemented
 * support for the new radiotap timestamp field
 * send delBA in two unexpected conditions (as required by the spec)
 * connect keys cleanups - allow only WEP with index 0-3
 * per-station aggregation limit to work around broken APs
 * debugfs improvement for the integrated codel algorithm
and various other small improvements and cleanups.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
728 files changed:
.mailmap
Documentation/ABI/stable/sysfs-devices
Documentation/PCI/pci.txt
Documentation/arm/CCN.txt
Documentation/cpu-freq/cpufreq-stats.txt
Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
Documentation/devicetree/bindings/net/dsa/qca8k.txt [new file with mode: 0644]
Documentation/devicetree/bindings/serial/8250.txt
Documentation/filesystems/overlayfs.txt
Documentation/i2c/slave-interface
Documentation/rapidio/mport_cdev.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/arm/boot/dts/am335x-baltos.dtsi
arch/arm/boot/dts/am335x-igep0033.dtsi
arch/arm/boot/dts/am335x-phycore-som.dtsi
arch/arm/boot/dts/armada-388-clearfog.dts
arch/arm/boot/dts/exynos5410-odroidxu.dts
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sx-sabreauto.dts
arch/arm/boot/dts/imx7d-sdb.dts
arch/arm/boot/dts/kirkwood-ib62x0.dts
arch/arm/boot/dts/kirkwood-openrd.dtsi
arch/arm/boot/dts/logicpd-som-lv.dtsi
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/omap3-overo-base.dtsi
arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
arch/arm/boot/dts/rk3066a.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/rk3xxx.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/tegra114-dalmore.dts
arch/arm/boot/dts/tegra114-roth.dts
arch/arm/boot/dts/tegra114-tn7.dts
arch/arm/boot/dts/tegra124-jetson-tk1.dts
arch/arm/kernel/hyp-stub.S
arch/arm/mach-imx/mach-imx6ul.c
arch/arm/mach-imx/pm-imx6.c
arch/arm/mach-omap2/cm33xx.c
arch/arm/mach-omap2/cminst44xx.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-sa1100/clock.c
arch/arm/mach-sa1100/generic.c
arch/arm/mach-sa1100/generic.h
arch/arm/mm/proc-v7.S
arch/arm64/boot/dts/rockchip/rk3368.dtsi
arch/arm64/include/asm/percpu.h
arch/arm64/include/asm/spinlock.h
arch/arm64/mm/proc.S
arch/ia64/include/asm/uaccess.h
arch/parisc/Kconfig
arch/parisc/configs/c8000_defconfig
arch/parisc/configs/generic-64bit_defconfig
arch/parisc/include/asm/uaccess.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/lib/checksum_32.S
arch/powerpc/mm/slb_low.S
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/xics/icp-opal.c
arch/s390/Kconfig
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/defconfig
arch/s390/include/asm/uaccess.h
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/tile/Kconfig
arch/tile/include/asm/uaccess.h
arch/um/kernel/skas/syscall.c
arch/x86/Kconfig
arch/x86/configs/tiny.config
arch/x86/include/asm/uaccess.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/paravirt.c
arch/x86/mm/pat.c
arch/x86/um/ptrace_32.c
arch/x86/um/ptrace_64.c
crypto/cryptd.c
drivers/acpi/nfit/mce.c
drivers/acpi/scan.c
drivers/ata/libahci.c
drivers/ata/pata_ninja32.c
drivers/atm/iphase.c
drivers/base/power/runtime.c
drivers/base/regmap/regcache-rbtree.c
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap.c
drivers/bcma/driver_chipcommon.c
drivers/bcma/main.c
drivers/bus/arm-cci.c
drivers/bus/arm-ccn.c
drivers/bus/vexpress-config.c
drivers/char/hw_random/Kconfig
drivers/char/tpm/tpm2-cmd.c
drivers/char/virtio_console.c
drivers/clk/renesas/r8a7795-cpg-mssr.c
drivers/clk/rockchip/clk-rk3399.c
drivers/clk/sunxi-ng/ccu_common.c
drivers/clk/tegra/clk-tegra114.c
drivers/clocksource/timer-atmel-pit.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/crypto/caam/caamalg.c
drivers/crypto/qat/qat_common/qat_algs.c
drivers/crypto/vmx/aes_xts.c
drivers/dax/dax.c
drivers/dma/at_xdmac.c
drivers/dma/fsl_raid.c
drivers/dma/img-mdc-dma.c
drivers/dma/pxa_dma.c
drivers/dma/sh/usb-dmac.c
drivers/firmware/arm_scpi.c
drivers/firmware/dmi-id.c
drivers/gpio/Kconfig
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-sa1100.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vc4/vc4_irq.c
drivers/i2c/busses/i2c-bcm-kona.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/muxes/i2c-demux-pinctrl.c
drivers/iio/accel/Kconfig
drivers/iio/accel/bma220_spi.c
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/accel/kxsd9.c
drivers/iio/adc/Kconfig
drivers/iio/adc/ad799x.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/rockchip_saradc.c
drivers/iio/adc/ti-ads1015.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/chemical/atlas-ph-sensor.c
drivers/iio/common/hid-sensors/hid-sensor-attributes.c
drivers/iio/dac/stx104.c
drivers/iio/humidity/Kconfig
drivers/iio/humidity/am2315.c
drivers/iio/humidity/hdc100x.c
drivers/iio/industrialio-buffer.c
drivers/iio/industrialio-core.c
drivers/iio/light/Kconfig
drivers/iio/pressure/bmp280-core.c
drivers/iio/proximity/as3935.c
drivers/infiniband/core/multicast.c
drivers/infiniband/hw/cxgb4/Kconfig
drivers/infiniband/hw/cxgb4/Makefile
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/chip.h
drivers/infiniband/hw/hfi1/debugfs.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/mad.c
drivers/infiniband/hw/hfi1/pio_copy.c
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/i40iw/i40iw_hw.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/mailbox/Kconfig
drivers/mailbox/bcm-pdc-mailbox.c
drivers/md/bitmap.c
drivers/md/dm-bufio.c
drivers/md/dm-crypt.c
drivers/md/dm-log-writes.c
drivers/md/md-cluster.c
drivers/md/md.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/memory/omap-gpmc.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/bh1780gli.c [deleted file]
drivers/misc/lkdtm_rodata.c
drivers/misc/lkdtm_usercopy.c
drivers/misc/mei/hw-me.c
drivers/misc/mei/pci-me.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/Kconfig
drivers/net/dsa/Makefile
drivers/net/dsa/b53/b53_priv.h
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/qca8k.c [new file with mode: 0644]
drivers/net/dsa/qca8k.h [new file with mode: 0644]
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/amd/7990.c
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cavium/thunder/nic.h
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/libcxgb/Makefile
drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c [new file with mode: 0644]
drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h [new file with mode: 0644]
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/freescale/fman/fman_mac.h
drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
drivers/net/ethernet/freescale/fs_enet/mac-fec.c
drivers/net/ethernet/freescale/fs_enet/mac-scc.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/i825xx/82596.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/mal.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/marvell/mvneta_bm.h
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/natsemi/ns83820.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/falcon_boards.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/sis/sis900.h
drivers/net/ethernet/smsc/smc91x.h
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
drivers/net/ethernet/synopsys/dwc_eth_qos.c
drivers/net/ethernet/toshiba/ps3_gelic_net.c
drivers/net/hamradio/bpqether.c
drivers/net/phy/Kconfig
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/ahb.c
drivers/net/wireless/ath/ath10k/bmi.c
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/swap.c
drivers/net/wireless/ath/ath10k/swap.h
drivers/net/wireless/ath/ath10k/testmode.c
drivers/net/wireless/ath/ath10k/thermal.c
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/wmi-ops.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath5k/debug.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/hif.c
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/ath/dfs_pattern_detector.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/fw.h
drivers/net/wireless/ath/wil6210/fw_inc.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/p2p.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/ath/wil6210/wmi.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
drivers/net/wireless/intel/iwlegacy/3945.c
drivers/net/wireless/intel/iwlegacy/common.h
drivers/net/wireless/intel/iwlwifi/iwl-9000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intersil/hostap/hostap_ioctl.c
drivers/net/wireless/marvell/mwifiex/11h.c
drivers/net/wireless/marvell/mwifiex/11n.h
drivers/net/wireless/marvell/mwifiex/11n_aggr.c
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
drivers/net/wireless/marvell/mwifiex/cfg80211.c
drivers/net/wireless/marvell/mwifiex/cmdevt.c
drivers/net/wireless/marvell/mwifiex/debugfs.c
drivers/net/wireless/marvell/mwifiex/fw.h
drivers/net/wireless/marvell/mwifiex/init.c
drivers/net/wireless/marvell/mwifiex/join.c
drivers/net/wireless/marvell/mwifiex/main.c
drivers/net/wireless/marvell/mwifiex/main.h
drivers/net/wireless/marvell/mwifiex/pcie.c
drivers/net/wireless/marvell/mwifiex/pcie.h
drivers/net/wireless/marvell/mwifiex/scan.c
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
drivers/net/wireless/marvell/mwifiex/sta_event.c
drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
drivers/net/wireless/marvell/mwifiex/uap_event.c
drivers/net/wireless/marvell/mwifiex/usb.c
drivers/net/wireless/marvell/mwifiex/usb.h
drivers/net/wireless/marvell/mwifiex/util.c
drivers/net/wireless/mediatek/mt7601u/dma.c
drivers/net/wireless/mediatek/mt7601u/dma.h
drivers/net/wireless/mediatek/mt7601u/eeprom.c
drivers/net/wireless/mediatek/mt7601u/init.c
drivers/net/wireless/mediatek/mt7601u/mac.c
drivers/net/wireless/mediatek/mt7601u/main.c
drivers/net/wireless/mediatek/mt7601u/mcu.c
drivers/net/wireless/mediatek/mt7601u/mt7601u.h
drivers/net/wireless/mediatek/mt7601u/phy.c
drivers/net/wireless/mediatek/mt7601u/regs.h
drivers/net/wireless/mediatek/mt7601u/tx.c
drivers/net/wireless/mediatek/mt7601u/util.h [deleted file]
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/net/wireless/realtek/rtlwifi/regd.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
drivers/net/wireless/ti/wl18xx/acx.c
drivers/net/wireless/ti/wl18xx/acx.h
drivers/net/wireless/ti/wl18xx/event.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/wireless/ti/wlcore/wlcore.h
drivers/net/wireless/ti/wlcore/wlcore_i.h
drivers/net/wireless/wl3501_cs.c
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
drivers/nvdimm/bus.c
drivers/nvme/host/Kconfig
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/rdma.c
drivers/nvme/target/Kconfig
drivers/nvme/target/loop.c
drivers/nvme/target/rdma.c
drivers/pci/quirks.c
drivers/perf/arm_pmu.c
drivers/phy/phy-brcm-sata.c
drivers/phy/phy-sun4i-usb.c
drivers/phy/phy-sun9i-usb.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/pinctrl-pistachio.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
drivers/rapidio/devices/tsi721.c
drivers/regulator/max14577-regulator.c
drivers/regulator/max77693-regulator.c
drivers/regulator/qcom_smd-regulator.c
drivers/scsi/constants.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_transport_sas.c
drivers/scsi/ses.c
drivers/scsi/wd719x.c
drivers/spi/spi-img-spfi.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-pxa2xx-pci.c
drivers/spi/spi-qup.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi.c
drivers/staging/comedi/drivers/adv_pci1760.c
drivers/staging/comedi/drivers/comedi_test.c
drivers/staging/comedi/drivers/daqboard2000.c
drivers/staging/comedi/drivers/dt2811.c
drivers/staging/comedi/drivers/ni_mio_common.c
drivers/staging/iio/impedance-analyzer/ad5933.c
drivers/staging/lustre/lustre/llite/namei.c
drivers/staging/wilc1000/host_interface.c
drivers/staging/wilc1000/linux_wlan.c
drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
drivers/target/iscsi/cxgbit/cxgbit_cm.c
drivers/thermal/rcar_thermal.c
drivers/thunderbolt/nhi.c
drivers/thunderbolt/switch.c
drivers/tty/serial/8250/8250.h
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_fintek.c
drivers/tty/serial/8250/8250_mid.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/8250/Kconfig
drivers/usb/chipidea/udc.c
drivers/usb/core/config.c
drivers/usb/core/devio.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/debug.h
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_eem.c
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/function/u_serial.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/fsl_qe_udc.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/xhci-ring.c
drivers/usb/musb/musb_virthub.c
drivers/usb/phy/phy-generic.c
drivers/usb/renesas_usbhs/mod.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/virtio/virtio_ring.c
fs/binfmt_elf.c
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/tree-log.c
fs/ceph/dir.c
fs/crypto/policy.c
fs/devpts/inode.c
fs/ext4/ioctl.c
fs/f2fs/file.c
fs/fuse/file.c
fs/iomap.c
fs/kernfs/file.c
fs/nfs/blocklayout/blocklayout.c
fs/nfs/blocklayout/blocklayout.h
fs/nfs/blocklayout/extent_tree.c
fs/nfs/callback.c
fs/nfs/callback_proc.c
fs/nfs/client.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/flexfilelayout/flexfilelayout.h
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/internal.h
fs/nfs/nfs42proc.c
fs/nfs/nfs4client.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4session.c
fs/nfs/nfs4session.h
fs/nfs/pnfs.c
fs/nfs/super.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/proc/base.c
fs/proc/task_mmu.c
fs/sysfs/file.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_defer.c
fs/xfs/libxfs/xfs_defer.h
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_sb.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_super.c
fs/xfs/xfs_trace.h
include/asm-generic/uaccess.h
include/linux/acpi.h
include/linux/bitfield.h [new file with mode: 0644]
include/linux/bug.h
include/linux/compiler-gcc.h
include/linux/fence.h
include/linux/fs.h
include/linux/fscrypto.h
include/linux/iio/sw_trigger.h
include/linux/iomap.h
include/linux/mempolicy.h
include/linux/mfd/da8xx-cfgchip.h [new file with mode: 0644]
include/linux/mfd/ti_am335x_tscadc.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mmc/sdio_ids.h
include/linux/mmzone.h
include/linux/netdevice.h
include/linux/nvme.h
include/linux/pci.h
include/linux/serial_8250.h
include/linux/thread_info.h
include/net/af_unix.h
include/net/cfg80211.h
include/net/dsa.h
include/net/ip6_tunnel.h
include/net/ip_fib.h
include/net/ip_tunnels.h
include/net/l3mdev.h
include/net/netfilter/nft_meta.h
include/net/netfilter/nft_reject.h
include/net/sctp/structs.h
include/net/tc_act/tc_skbmod.h [new file with mode: 0644]
include/scsi/scsi_transport_sas.h
include/uapi/linux/if_tunnel.h
include/uapi/linux/pkt_cls.h
include/uapi/linux/tc_act/tc_skbmod.h [new file with mode: 0644]
kernel/audit_watch.c
kernel/configs/tiny.config
kernel/cpuset.c
kernel/exit.c
kernel/fork.c
kernel/kexec_file.c
kernel/memremap.c
kernel/power/qos.c
kernel/printk/nmi.c
kernel/seccomp.c
kernel/time/tick-sched.c
lib/Kconfig.debug
lib/Makefile
lib/test_bpf.c
lib/test_hash.c
lib/usercopy.c [deleted file]
mm/huge_memory.c
mm/mempolicy.c
mm/page_alloc.c
mm/usercopy.c
mm/vmscan.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_stp_if.c
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/nft_meta_bridge.c
net/core/dev.c
net/core/flow_dissector.c
net/dsa/Kconfig
net/dsa/Makefile
net/dsa/dsa.c
net/dsa/dsa_priv.h
net/dsa/tag_qca.c [new file with mode: 0644]
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/ip_tunnel.c
net/ipv4/ipip.c
net/ipv4/netfilter/nft_reject_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_yeah.c
net/ipv4/xfrm4_policy.c
net/ipv6/addrconf.c
net/ipv6/ip6_tunnel.c
net/ipv6/netfilter/nft_reject_ipv6.c
net/ipv6/ping.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_policy.c
net/kcm/kcmsock.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_eth.c
net/llc/af_llc.c
net/mac80211/tdls.c
net/netfilter/nf_tables_netdev.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nft_meta.c
net/netfilter/nft_reject.c
net/netfilter/nft_reject_inet.c
net/openvswitch/actions.c
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/flow_table.c
net/rxrpc/Kconfig
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_event.c
net/rxrpc/call_object.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/local_event.c
net/rxrpc/local_object.c
net/rxrpc/misc.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/proc.c
net/rxrpc/recvmsg.c
net/rxrpc/sysctl.c
net/rxrpc/utils.c
net/sched/Kconfig
net/sched/Makefile
net/sched/act_bpf.c
net/sched/act_skbmod.c [new file with mode: 0644]
net/sched/act_tunnel_key.c
net/sched/cls_bpf.c
net/sched/cls_flower.c
net/sched/sch_fq.c
net/sctp/chunk.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sunrpc/clnt.c
net/tipc/name_distr.c
net/tipc/udp_media.c
net/unix/af_unix.c
net/wireless/wext-core.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
samples/bpf/tcbpf2_kern.c
samples/bpf/test_ipip.sh [new file with mode: 0755]
samples/bpf/test_tunnel_bpf.sh
scripts/checkpatch.pl
scripts/package/builddeb
scripts/tags.sh
security/Kconfig
sound/core/rawmidi.c
sound/core/timer.c
sound/firewire/fireworks/fireworks.h
sound/firewire/fireworks/fireworks_hwdep.c
sound/firewire/fireworks/fireworks_proc.c
sound/firewire/fireworks/fireworks_transaction.c
sound/firewire/tascam/tascam-hwdep.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks.c
tools/iio/iio_generic_buffer.c

index 2a91c14..de22dae 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -88,6 +88,7 @@ Kay Sievers <kay.sievers@vrfy.org>
 Kenneth W Chen <kenneth.w.chen@intel.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
 Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
@@ -158,6 +159,8 @@ Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
+Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
+Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
index 43f78b8..df449d7 100644 (file)
@@ -1,7 +1,7 @@
 # Note: This documents additional properties of any device beyond what
 # is documented in Documentation/sysfs-rules.txt
 
-What:          /sys/devices/*/of_path
+What:          /sys/devices/*/of_node
 Date:          February 2015
 Contact:       Device Tree mailing list <devicetree@vger.kernel.org>
 Description:
index 123881f..77f49dc 100644 (file)
@@ -124,7 +124,6 @@ initialization with a pointer to a structure describing the driver
 
 The ID table is an array of struct pci_device_id entries ending with an
 all-zero entry.  Definitions with static const are generally preferred.
-Use of the deprecated macro DEFINE_PCI_DEVICE_TABLE should be avoided.
 
 Each entry consists of:
 
index ffca443..15cdb7b 100644 (file)
@@ -18,13 +18,17 @@ and config2 fields of the perf_event_attr structure. The "events"
 directory provides configuration templates for all documented
 events, that can be used with perf tool. For example "xp_valid_flit"
 is an equivalent of "type=0x8,event=0x4". Other parameters must be
-explicitly specified. For events originating from device, "node"
-defines its index. All crosspoint events require "xp" (index),
-"port" (device port number) and "vc" (virtual channel ID) and
-"dir" (direction). Watchpoints (special "event" value 0xfe) also
-require comparator values ("cmp_l" and "cmp_h") and "mask", being
-index of the comparator mask.
+explicitly specified.
 
+For events originating from device, "node" defines its index.
+
+Crosspoint PMU events require "xp" (index), "bus" (bus number)
+and "vc" (virtual channel ID).
+
+Crosspoint watchpoint-based events (special "event" value 0xfe)
+require "xp" and "vc" as as above plus "port" (device port index),
+"dir" (transmit/receive direction), comparator values ("cmp_l"
+and "cmp_h") and "mask", being index of the comparator mask.
 Masks are defined separately from the event description
 (due to limited number of the config values) in the "cmp_mask"
 directory, with first 8 configurable by user and additional
index fc64749..8d9773f 100644 (file)
@@ -103,7 +103,7 @@ Config Main Menu
        Power management options (ACPI, APM)  --->
                CPU Frequency scaling  --->
                        [*] CPU Frequency scaling
-                       <*>   CPU frequency translation statistics 
+                       [*]   CPU frequency translation statistics
                        [*]     CPU frequency translation statistics details
 
 
index bf99e2f..205593f 100644 (file)
@@ -16,6 +16,11 @@ Required properties:
 - vref-supply: The regulator supply ADC reference voltage.
 - #io-channel-cells: Should be 1, see ../iio-bindings.txt
 
+Optional properties:
+- resets: Must contain an entry for each entry in reset-names if need support
+         this option. See ../reset/reset.txt for details.
+- reset-names: Must include the name "saradc-apb".
+
 Example:
        saradc: saradc@2006c000 {
                compatible = "rockchip,saradc";
@@ -23,6 +28,8 @@ Example:
                interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                #io-channel-cells = <1>;
                vref-supply = <&vcc18>;
        };
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
new file mode 100644 (file)
index 0000000..9c67ee4
--- /dev/null
@@ -0,0 +1,89 @@
+* Qualcomm Atheros QCA8xxx switch family
+
+Required properties:
+
+- compatible: should be "qca,qca8337"
+- #size-cells: must be 0
+- #address-cells: must be 1
+
+Subnodes:
+
+The integrated switch subnode should be specified according to the binding
+described in dsa/dsa.txt. As the QCA8K switches do not have a N:N mapping of
+port and PHY id, each subnode describing a port needs to have a valid phandle
+referencing the internal PHY connected to it. The CPU port of this switch is
+always port 0.
+
+Example:
+
+
+       &mdio0 {
+               phy_port1: phy@0 {
+                       reg = <0>;
+               };
+
+               phy_port2: phy@1 {
+                       reg = <1>;
+               };
+
+               phy_port3: phy@2 {
+                       reg = <2>;
+               };
+
+               phy_port4: phy@3 {
+                       reg = <3>;
+               };
+
+               phy_port5: phy@4 {
+                       reg = <4>;
+               };
+
+               switch0@0 {
+                       compatible = "qca,qca8337";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       reg = <0>;
+
+                       ports {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               port@0 {
+                                       reg = <0>;
+                                       label = "cpu";
+                                       ethernet = <&gmac1>;
+                                       phy-mode = "rgmii";
+                               };
+
+                               port@1 {
+                                       reg = <1>;
+                                       label = "lan1";
+                                       phy-handle = <&phy_port1>;
+                               };
+
+                               port@2 {
+                                       reg = <2>;
+                                       label = "lan2";
+                                       phy-handle = <&phy_port2>;
+                               };
+
+                               port@3 {
+                                       reg = <3>;
+                                       label = "lan3";
+                                       phy-handle = <&phy_port3>;
+                               };
+
+                               port@4 {
+                                       reg = <4>;
+                                       label = "lan4";
+                                       phy-handle = <&phy_port4>;
+                               };
+
+                               port@5 {
+                                       reg = <5>;
+                                       label = "wan";
+                                       phy-handle = <&phy_port5>;
+                               };
+                       };
+               };
+       };
index f5561ac..936ab5b 100644 (file)
@@ -42,9 +42,6 @@ Optional properties:
 - auto-flow-control: one way to enable automatic flow control support. The
   driver is allowed to detect support for the capability even without this
   property.
-- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD
-  line respectively. It will use specified GPIO instead of the peripheral
-  function pin for the UART feature. If unsure, don't specify this property.
 
 Note:
 * fsl,ns16550:
@@ -66,19 +63,3 @@ Example:
                interrupts = <10>;
                reg-shift = <2>;
        };
-
-Example for OMAP UART using GPIO-based modem control signals:
-
-       uart4: serial@49042000 {
-               compatible = "ti,omap3-uart";
-               reg = <0x49042000 0x400>;
-               interrupts = <80>;
-               ti,hwmods = "uart4";
-               clock-frequency = <48000000>;
-               cts-gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
-               rts-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
-               dtr-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
-               dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
-               dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
-               rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
-       };
index d6259c7..bcbf971 100644 (file)
@@ -183,12 +183,10 @@ The copy_up operation essentially creates a new, identical file and
 moves it over to the old name.  The new file may be on a different
 filesystem, so both st_dev and st_ino of the file may change.
 
-Any open files referring to this inode will access the old data and
-metadata.  Similarly any file locks obtained before copy_up will not
-apply to the copied up file.
+Any open files referring to this inode will access the old data.
 
-On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and
-fsetxattr(2) will fail with EROFS.
+Any file locks (and leases) obtained before copy_up will not apply
+to the copied up file.
 
 If a file with multiple hard links is copied up, then this will
 "break" the link.  Changes will not be propagated to other names
index 80807ad..7e2a228 100644 (file)
@@ -145,6 +145,11 @@ If you want to add slave support to the bus driver:
 
 * Catch the slave interrupts and send appropriate i2c_slave_events to the backend.
 
+Note that most hardware supports being master _and_ slave on the same bus. So,
+if you extend a bus driver, please make sure that the driver supports that as
+well. In almost all cases, slave support does not need to disable the master
+functionality.
+
 Check the i2c-rcar driver as an example.
 
 
index 6e491a6..a53f786 100644 (file)
@@ -80,6 +80,10 @@ functionality of their platform when planning to use this driver:
 
 III. Module parameters
 
+- 'dma_timeout' - DMA transfer completion timeout (in msec, default value 3000).
+        This parameter set a maximum completion wait time for SYNC mode DMA
+        transfer requests and for RIO_WAIT_FOR_ASYNC ioctl requests.
+
 - 'dbg_level' - This parameter allows to control amount of debug information
         generated by this device driver. This parameter is formed by set of
         bit masks that correspond to the specific functional blocks.
index 9a17835..ce80b36 100644 (file)
@@ -807,6 +807,7 @@ M:  Laura Abbott <labbott@redhat.com>
 M:     Sumit Semwal <sumit.semwal@linaro.org>
 L:     devel@driverdev.osuosl.org
 S:     Supported
+F:     Documentation/devicetree/bindings/staging/ion/
 F:     drivers/staging/android/ion
 F:     drivers/staging/android/uapi/ion.h
 F:     drivers/staging/android/uapi/ion_test.h
@@ -1632,7 +1633,7 @@ N:        rockchip
 
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene@kernel.org>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:     Maintained
@@ -1652,7 +1653,6 @@ F:        drivers/*/*s3c64xx*
 F:     drivers/*/*s5pv210*
 F:     drivers/memory/samsung/*
 F:     drivers/soc/samsung/*
-F:     drivers/spi/spi-s3c*
 F:     Documentation/arm/Samsung/
 F:     Documentation/devicetree/bindings/arm/samsung/
 F:     Documentation/devicetree/bindings/sram/samsung-sram.txt
@@ -1840,6 +1840,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
 ARM/UNIPHIER ARCHITECTURE
 M:     Masahiro Yamada <yamada.masahiro@socionext.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-uniphier.git
 S:     Maintained
 F:     arch/arm/boot/dts/uniphier*
 F:     arch/arm/include/asm/hardware/cache-uniphier.h
@@ -2493,7 +2494,7 @@ F:        include/net/bluetooth/
 BONDING DRIVER
 M:     Jay Vosburgh <j.vosburgh@gmail.com>
 M:     Veaceslav Falico <vfalico@gmail.com>
-M:     Andy Gospodarek <gospo@cumulusnetworks.com>
+M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
 S:     Supported
@@ -3256,7 +3257,7 @@ F:        kernel/cpuset.c
 CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
 M:     Johannes Weiner <hannes@cmpxchg.org>
 M:     Michal Hocko <mhocko@kernel.org>
-M:     Vladimir Davydov <vdavydov@virtuozzo.com>
+M:     Vladimir Davydov <vdavydov.dev@gmail.com>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
 S:     Maintained
@@ -3277,7 +3278,7 @@ S:        Maintained
 F:     drivers/net/wan/cosa*
 
 CPMAC ETHERNET DRIVER
-M:     Florian Fainelli <florian@openwrt.org>
+M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/ti/cpmac.c
@@ -7473,7 +7474,8 @@ F:        Documentation/devicetree/bindings/sound/max9860.txt
 F:     sound/soc/codecs/max9860.*
 
 MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     drivers/power/max14577_charger.c
@@ -7489,7 +7491,8 @@ F:        include/dt-bindings/*/*max77802.h
 
 MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
 M:     Chanwoo Choi <cw00.choi@samsung.com>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     drivers/*/max14577*.c
@@ -9255,7 +9258,7 @@ F:        drivers/pinctrl/sh-pfc/
 
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <tomasz.figa@gmail.com>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -10195,7 +10198,7 @@ S:      Maintained
 F:     drivers/platform/x86/samsung-laptop.c
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Sangbeom Kim <sbkim73@samsung.com>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -10210,7 +10213,8 @@ F:      drivers/video/fbdev/s3c-fb.c
 
 SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
 M:     Sangbeom Kim <sbkim73@samsung.com>
-M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Supported
@@ -10269,6 +10273,17 @@ S:     Supported
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 F:     drivers/clk/samsung/
 
+SAMSUNG SPI DRIVERS
+M:     Kukjin Kim <kgene@kernel.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Andi Shyti <andi.shyti@samsung.com>
+L:     linux-spi@vger.kernel.org
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+S:     Maintained
+F:     Documentation/devicetree/bindings/spi/spi-samsung.txt
+F:     drivers/spi/spi-s3c*
+F:     include/linux/platform_data/spi-s3c64xx.h
+
 SAMSUNG SXGBE DRIVERS
 M:     Byungho An <bh74.an@samsung.com>
 M:     Girish K S <ks.giri@samsung.com>
@@ -11248,12 +11263,8 @@ S:     Odd Fixes
 F:     drivers/staging/vt665?/
 
 STAGING - WILC1000 WIFI DRIVER
-M:     Johnny Kim <johnny.kim@atmel.com>
-M:     Austin Shin <austin.shin@atmel.com>
-M:     Chris Park <chris.park@atmel.com>
-M:     Tony Cho <tony.cho@atmel.com>
-M:     Glen Lee <glen.lee@atmel.com>
-M:     Leo Kim <leo.kim@atmel.com>
+M:     Aditya Shankar <aditya.shankar@microchip.com>
+M:     Ganesh Krishna <ganesh.krishna@microchip.com>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 F:     drivers/staging/wilc1000/
index 67f42d5..1a8c8dd 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
index e9c9334..fd6e971 100644 (file)
@@ -336,17 +336,6 @@ config HAVE_ARCH_SECCOMP_FILTER
            results in the system call being skipped immediately.
          - seccomp syscall wired up
 
-         For best performance, an arch should use seccomp_phase1 and
-         seccomp_phase2 directly.  It should call seccomp_phase1 for all
-         syscalls if TIF_SECCOMP is set, but seccomp_phase1 does not
-         need to be called from a ptrace-safe context.  It must then
-         call seccomp_phase2 if seccomp_phase1 returns anything other
-         than SECCOMP_PHASE1_OK or SECCOMP_PHASE1_SKIP.
-
-         As an additional optimization, an arch may provide seccomp_data
-         directly to seccomp_phase1; this avoids multiple calls
-         to the syscall_xyz helpers for every syscall.
-
 config SECCOMP_FILTER
        def_bool y
        depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
index c8609d8..b689172 100644 (file)
 
                #address-cells = <1>;
                #size-cells = <1>;
-               elm_id = <&elm>;
+               ti,elm-id = <&elm>;
        };
 };
 
index df63484..e7d9ca1 100644 (file)
 
                #address-cells = <1>;
                #size-cells = <1>;
-               elm_id = <&elm>;
+               ti,elm-id = <&elm>;
 
                /* MTD partition table */
                partition@0 {
index 86f7731..1263c9d 100644 (file)
                gpmc,wr-access-ns = <30>;
                gpmc,wr-data-mux-bus-ns = <0>;
 
-               elm_id = <&elm>;
+               ti,elm-id = <&elm>;
 
                #address-cells = <1>;
                #size-cells = <1>;
index 2e0556a..d3e6bd8 100644 (file)
 
                        port@0 {
                                reg = <0>;
-                               label = "lan1";
+                               label = "lan5";
                        };
 
                        port@1 {
                                reg = <1>;
-                               label = "lan2";
+                               label = "lan4";
                        };
 
                        port@2 {
 
                        port@3 {
                                reg = <3>;
-                               label = "lan4";
+                               label = "lan2";
                        };
 
                        port@4 {
                                reg = <4>;
-                               label = "lan5";
+                               label = "lan1";
                        };
 
                        port@5 {
index d949931..f6d1352 100644 (file)
        samsung,dw-mshc-ciu-div = <3>;
        samsung,dw-mshc-sdr-timing = <0 4>;
        samsung,dw-mshc-ddr-timing = <0 2>;
-       samsung,dw-mshc-hs400-timing = <0 2>;
-       samsung,read-strobe-delay = <90>;
        pinctrl-names = "default";
        pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8 &sd0_cd>;
        bus-width = <8>;
        cap-mmc-highspeed;
        mmc-hs200-1_8v;
-       mmc-hs400-1_8v;
        vmmc-supply = <&ldo20_reg>;
        vqmmc-supply = <&ldo11_reg>;
 };
index b620ac8..b13b0b2 100644 (file)
                                        clocks = <&clks IMX6QDL_CLK_SPDIF_GCLK>, <&clks IMX6QDL_CLK_OSC>,
                                                 <&clks IMX6QDL_CLK_SPDIF>, <&clks IMX6QDL_CLK_ASRC>,
                                                 <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_ESAI_EXTAL>,
-                                                <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_MLB>,
+                                                <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_DUMMY>,
                                                 <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_SPBA>;
                                        clock-names = "core",  "rxtx0",
                                                      "rxtx1", "rxtx2",
index 96ea936..240a286 100644 (file)
@@ -64,7 +64,7 @@
        cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
        no-1-8-v;
        keep-power-in-suspend;
-       enable-sdio-wakup;
+       wakeup-source;
        status = "okay";
 };
 
index 95ee268..2f33c46 100644 (file)
                ti,y-min = /bits/ 16 <0>;
                ti,y-max = /bits/ 16 <0>;
                ti,pressure-max = /bits/ 16 <0>;
-               ti,x-plat-ohms = /bits/ 16 <400>;
+               ti,x-plate-ohms = /bits/ 16 <400>;
                wakeup-source;
        };
 };
index ef84d86..5bf6289 100644 (file)
 
        partition@e0000 {
                label = "u-boot environment";
-               reg = <0xe0000 0x100000>;
+               reg = <0xe0000 0x20000>;
        };
 
        partition@100000 {
index e4ecab1..7175511 100644 (file)
        };
 };
 
+&pciec {
+       status = "okay";
+};
+
 &pcie0 {
        status = "okay";
 };
index 365f39f..0ff1c2d 100644 (file)
        ranges = <0 0 0x00000000 0x1000000>;    /* CS0: 16MB for NAND */
 
        nand@0,0 {
-               linux,mtd-name = "micron,mt29f4g16abbda3w";
+               compatible = "ti,omap2-nand";
                reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+               interrupt-parent = <&gpmc>;
+               interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
+                            <1 IRQ_TYPE_NONE>; /* termcount */
+               linux,mtd-name = "micron,mt29f4g16abbda3w";
                nand-bus-width = <16>;
                ti,nand-ecc-opt = "bch8";
+               rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
                gpmc,cs-rd-off-ns = <44>;
                gpmc,wr-access-ns = <40>;
                gpmc,wr-data-mux-bus-ns = <0>;
                gpmc,device-width = <2>;
-
-               gpmc,page-burst-access-ns = <5>;
-               gpmc,cycle2cycle-delay-ns = <50>;
-
                #address-cells = <1>;
                #size-cells = <1>;
 
index 5e9a13c..1c2c746 100644 (file)
@@ -46,6 +46,7 @@
                linux,mtd-name = "micron,mt29f4g16abbda3w";
                nand-bus-width = <16>;
                ti,nand-ecc-opt = "bch8";
+               rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
                gpmc,cs-rd-off-ns = <44>;
index de256fa..3e946ca 100644 (file)
 };
 
 &gpmc {
-       ranges = <0 0 0x00000000 0x20000000>;
+       ranges = <0 0 0x30000000 0x1000000>,    /* CS0 */
+                <4 0 0x2b000000 0x1000000>,    /* CS4 */
+                <5 0 0x2c000000 0x1000000>;    /* CS5 */
 
        nand@0,0 {
                compatible = "ti,omap2-nand";
index 7df2792..4f4c6ef 100644 (file)
@@ -55,8 +55,6 @@
 #include "omap-gpmc-smsc9221.dtsi"
 
 &gpmc {
-       ranges = <5 0 0x2c000000 0x1000000>;    /* CS5 */
-
        ethernet@gpmc {
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
index 9e24b6a..1b304e2 100644 (file)
@@ -27,8 +27,6 @@
 #include "omap-gpmc-smsc9221.dtsi"
 
 &gpmc {
-       ranges = <5 0 0x2c000000 0x1000000>;    /* CS5 */
-
        ethernet@gpmc {
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
index 334109e..82e98ee 100644 (file)
@@ -15,9 +15,6 @@
 #include "omap-gpmc-smsc9221.dtsi"
 
 &gpmc {
-       ranges = <4 0 0x2b000000 0x1000000>,    /* CS4 */
-                <5 0 0x2c000000 0x1000000>;    /* CS5 */
-
        smsc1: ethernet@gpmc {
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio6>;
index c0ba86c..0d0dae3 100644 (file)
                clock-names = "saradc", "apb_pclk";
                interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
                #io-channel-cells = <1>;
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index cd33f01..91c4b3c 100644 (file)
                #io-channel-cells = <1>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index 99bbcc2..e2cd683 100644 (file)
                #io-channel-cells = <1>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index e012890..a17ba02 100644 (file)
@@ -84,7 +84,7 @@
                        trips {
                                cpu_alert0: cpu_alert0 {
                                        /* milliCelsius */
-                                       temperature = <850000>;
+                                       temperature = <85000>;
                                        hysteresis = <2000>;
                                        type = "passive";
                                };
index 1dfc492..1444fbd 100644 (file)
                palmas: tps65913@58 {
                        compatible = "ti,palmas";
                        reg = <0x58>;
-                       interrupts = <0 86 IRQ_TYPE_LEVEL_LOW>;
+                       interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
 
                        #interrupt-cells = <2>;
                        interrupt-controller;
index 70cf409..966a7fc 100644 (file)
                palmas: pmic@58 {
                        compatible = "ti,palmas";
                        reg = <0x58>;
-                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 
                        #interrupt-cells = <2>;
                        interrupt-controller;
index 17dd145..a161fa1 100644 (file)
@@ -63,7 +63,7 @@
                palmas: pmic@58 {
                        compatible = "ti,palmas";
                        reg = <0x58>;
-                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 
                        #interrupt-cells = <2>;
                        interrupt-controller;
index 6403e0d..e52b824 100644 (file)
         *   Pin 41: BR_UART1_TXD
         *   Pin 44: BR_UART1_RXD
         */
-       serial@0,70006000 {
+       serial@70006000 {
                compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
                status = "okay";
        };
         *   Pin 71: UART2_CTS_L
         *   Pin 74: UART2_RTS_L
         */
-       serial@0,70006040 {
+       serial@70006040 {
                compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
                status = "okay";
        };
index 0b1e4a9..15d073a 100644 (file)
@@ -142,6 +142,19 @@ ARM_BE8(orr        r7, r7, #(1 << 25))     @ HSCTLR.EE
        and     r7, #0x1f               @ Preserve HPMN
        mcr     p15, 4, r7, c1, c1, 1   @ HDCR
 
+       @ Make sure NS-SVC is initialised appropriately
+       mrc     p15, 0, r7, c1, c0, 0   @ SCTLR
+       orr     r7, #(1 << 5)           @ CP15 barriers enabled
+       bic     r7, #(3 << 7)           @ Clear SED/ITD for v8 (RES0 for v7)
+       bic     r7, #(3 << 19)          @ WXN and UWXN disabled
+       mcr     p15, 0, r7, c1, c0, 0   @ SCTLR
+
+       mrc     p15, 0, r7, c0, c0, 0   @ MIDR
+       mcr     p15, 4, r7, c0, c0, 0   @ VPIDR
+
+       mrc     p15, 0, r7, c0, c0, 5   @ MPIDR
+       mcr     p15, 4, r7, c0, c0, 5   @ VMPIDR
+
 #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
        @ make CNTP_* and CNTPCT accessible from PL1
        mrc     p15, 0, r7, c0, c1, 1   @ ID_PFR1
index 5d9bfab..6bb7d9c 100644 (file)
@@ -64,6 +64,7 @@ static void __init imx6ul_init_machine(void)
        if (parent == NULL)
                pr_warn("failed to initialize soc device\n");
 
+       of_platform_default_populate(NULL, NULL, parent);
        imx6ul_enet_init();
        imx_anatop_init();
        imx6ul_pm_init();
index 58924b3..fe708e2 100644 (file)
@@ -295,7 +295,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
                val &= ~BM_CLPCR_SBYOS;
                if (cpu_is_imx6sl())
                        val |= BM_CLPCR_BYPASS_PMIC_READY;
-               if (cpu_is_imx6sl() || cpu_is_imx6sx())
+               if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
                        val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
                else
                        val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
@@ -310,7 +310,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
                val |= 0x3 << BP_CLPCR_STBY_COUNT;
                val |= BM_CLPCR_VSTBY;
                val |= BM_CLPCR_SBYOS;
-               if (cpu_is_imx6sl())
+               if (cpu_is_imx6sl() || cpu_is_imx6sx())
                        val |= BM_CLPCR_BYPASS_PMIC_READY;
                if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
                        val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
index c073fb5..6f2d0ae 100644 (file)
@@ -220,9 +220,6 @@ static int am33xx_cm_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout(_is_module_ready(inst, clkctrl_offs),
                          MAX_MODULE_READY_TIME, i);
 
@@ -246,9 +243,6 @@ static int am33xx_cm_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout((_clkctrl_idlest(inst, clkctrl_offs) ==
                                CLKCTRL_IDLEST_DISABLED),
                                MAX_MODULE_READY_TIME, i);
index 2c0e07e..2ab27ad 100644 (file)
@@ -278,9 +278,6 @@ static int omap4_cminst_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout(_is_module_ready(part, inst, clkctrl_offs),
                          MAX_MODULE_READY_TIME, i);
 
@@ -304,9 +301,6 @@ static int omap4_cminst_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs,
 {
        int i = 0;
 
-       if (!clkctrl_offs)
-               return 0;
-
        omap_test_timeout((_clkctrl_idlest(part, inst, clkctrl_offs) ==
                           CLKCTRL_IDLEST_DISABLED),
                          MAX_MODULE_DISABLE_TIME, i);
index 5b70938..1052b29 100644 (file)
@@ -1053,6 +1053,10 @@ static int _omap4_wait_target_disable(struct omap_hwmod *oh)
        if (oh->flags & HWMOD_NO_IDLEST)
                return 0;
 
+       if (!oh->prcm.omap4.clkctrl_offs &&
+           !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET))
+               return 0;
+
        return omap_cm_wait_module_idle(oh->clkdm->prcm_partition,
                                        oh->clkdm->cm_inst,
                                        oh->prcm.omap4.clkctrl_offs, 0);
@@ -2971,6 +2975,10 @@ static int _omap4_wait_target_ready(struct omap_hwmod *oh)
        if (!_find_mpu_rt_port(oh))
                return 0;
 
+       if (!oh->prcm.omap4.clkctrl_offs &&
+           !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET))
+               return 0;
+
        /* XXX check module SIDLEMODE, hardreset status */
 
        return omap_cm_wait_module_ready(oh->clkdm->prcm_partition,
index 4041bad..7890401 100644 (file)
@@ -443,8 +443,12 @@ struct omap_hwmod_omap2_prcm {
  * HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT: Some IP blocks don't have a PRCM
  *     module-level context loss register associated with them; this
  *     flag bit should be set in those cases
+ * HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET: Some IP blocks have a valid CLKCTRL
+ *     offset of zero; this flag bit should be set in those cases to
+ *     distinguish from hwmods that have no clkctrl offset.
  */
 #define HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT                (1 << 0)
+#define HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET                (1 << 1)
 
 /**
  * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
index 55c5878..e2d84aa 100644 (file)
@@ -29,6 +29,7 @@
 #define CLKCTRL(oh, clkctrl) ((oh).prcm.omap4.clkctrl_offs = (clkctrl))
 #define RSTCTRL(oh, rstctrl) ((oh).prcm.omap4.rstctrl_offs = (rstctrl))
 #define RSTST(oh, rstst) ((oh).prcm.omap4.rstst_offs = (rstst))
+#define PRCM_FLAGS(oh, flag) ((oh).prcm.omap4.flags = (flag))
 
 /*
  * 'l3' class
@@ -1296,6 +1297,7 @@ static void omap_hwmod_am33xx_clkctrl(void)
        CLKCTRL(am33xx_i2c1_hwmod, AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_wd_timer1_hwmod, AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_rtc_hwmod, AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET);
+       PRCM_FLAGS(am33xx_rtc_hwmod, HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_mmc2_hwmod, AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_gpmc_hwmod, AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET);
        CLKCTRL(am33xx_l4_ls_hwmod, AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET);
index d72ee61..1cc4a6f 100644 (file)
@@ -722,8 +722,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
  * display serial interface controller
  */
 
+static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+                          SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+                          SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
 static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
        .name = "dsi",
+       .sysc   = &omap3xxx_dsi_sysc,
 };
 
 static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
index cbf53bb..0db4689 100644 (file)
@@ -125,6 +125,8 @@ static unsigned long clk_36864_get_rate(struct clk *clk)
 }
 
 static struct clkops clk_36864_ops = {
+       .enable         = clk_cpu_enable,
+       .disable        = clk_cpu_disable,
        .get_rate       = clk_36864_get_rate,
 };
 
@@ -140,9 +142,8 @@ static struct clk_lookup sa11xx_clkregs[] = {
        CLKDEV_INIT(NULL, "OSTIMER0", &clk_36864),
 };
 
-static int __init sa11xx_clk_init(void)
+int __init sa11xx_clk_init(void)
 {
        clkdev_add_table(sa11xx_clkregs, ARRAY_SIZE(sa11xx_clkregs));
        return 0;
 }
-core_initcall(sa11xx_clk_init);
index 345e63f..3e09bed 100644 (file)
@@ -34,6 +34,7 @@
 
 #include <mach/hardware.h>
 #include <mach/irqs.h>
+#include <mach/reset.h>
 
 #include "generic.h"
 #include <clocksource/pxa.h>
@@ -95,6 +96,8 @@ static void sa1100_power_off(void)
 
 void sa11x0_restart(enum reboot_mode mode, const char *cmd)
 {
+       clear_reset_status(RESET_STATUS_ALL);
+
        if (mode == REBOOT_SOFT) {
                /* Jump into ROM at address 0 */
                soft_restart(0);
@@ -388,6 +391,7 @@ void __init sa1100_init_irq(void)
        sa11x0_init_irq_nodt(IRQ_GPIO0_SC, irq_resource.start);
 
        sa1100_init_gpio();
+       sa11xx_clk_init();
 }
 
 /*
index 0d92e11..68199b6 100644 (file)
@@ -44,3 +44,5 @@ int sa11x0_pm_init(void);
 #else
 static inline int sa11x0_pm_init(void) { return 0; }
 #endif
+
+int sa11xx_clk_init(void);
index a7123b4..d00d52c 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/hwcap.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
+#include <asm/memory.h>
 
 #include "proc-macros.S"
 
index d02a900..4f44d11 100644 (file)
                #io-channel-cells = <1>;
                clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
                clock-names = "saradc", "apb_pclk";
+               resets = <&cru SRST_SARADC>;
+               reset-names = "saradc-apb";
                status = "disabled";
        };
 
index 0a456be..2fee2f5 100644 (file)
@@ -199,19 +199,19 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
 #define _percpu_read(pcp)                                              \
 ({                                                                     \
        typeof(pcp) __retval;                                           \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)),      \
                                              sizeof(pcp));             \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
        __retval;                                                       \
 })
 
 #define _percpu_write(pcp, val)                                                \
 do {                                                                   \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val),       \
                                sizeof(pcp));                           \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
 } while(0)                                                             \
 
 #define _pcp_protect(operation, pcp, val)                      \
index e875a5a..89206b5 100644 (file)
@@ -363,4 +363,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 #define arch_read_relax(lock)  cpu_relax()
 #define arch_write_relax(lock) cpu_relax()
 
+/*
+ * Accesses appearing in program order before a spin_lock() operation
+ * can be reordered with accesses inside the critical section, by virtue
+ * of arch_spin_lock being constructed using acquire semantics.
+ *
+ * In cases where this is problematic (e.g. try_to_wake_up), an
+ * smp_mb__before_spinlock() can restore the required ordering.
+ */
+#define smp_mb__before_spinlock()      smp_mb()
+
 #endif /* __ASM_SPINLOCK_H */
index 5bb61de..9d37e96 100644 (file)
@@ -100,7 +100,16 @@ ENTRY(cpu_do_resume)
 
        msr     tcr_el1, x8
        msr     vbar_el1, x9
+
+       /*
+        * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
+        * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
+        * exception. Mask them until local_dbg_restore() in cpu_suspend()
+        * resets them.
+        */
+       disable_dbg
        msr     mdscr_el1, x10
+
        msr     sctlr_el1, x12
        /*
         * Restore oslsr_el1 by writing oslar_el1
index 465c709..0472927 100644 (file)
@@ -241,8 +241,7 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
 static inline unsigned long
 __copy_to_user (void __user *to, const void *from, unsigned long count)
 {
-       if (!__builtin_constant_p(count))
-               check_object_size(from, count, true);
+       check_object_size(from, count, true);
 
        return __copy_user(to, (__force void __user *) from, count);
 }
@@ -250,8 +249,7 @@ __copy_to_user (void __user *to, const void *from, unsigned long count)
 static inline unsigned long
 __copy_from_user (void *to, const void __user *from, unsigned long count)
 {
-       if (!__builtin_constant_p(count))
-               check_object_size(to, count, false);
+       check_object_size(to, count, false);
 
        return __copy_user((__force void __user *) to, from, count);
 }
@@ -265,8 +263,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
        long __cu_len = (n);                                                            \
                                                                                        \
        if (__access_ok(__cu_to, __cu_len, get_fs())) {                                 \
-               if (!__builtin_constant_p(n))                                           \
-                       check_object_size(__cu_from, __cu_len, true);                   \
+               check_object_size(__cu_from, __cu_len, true);                   \
                __cu_len = __copy_user(__cu_to, (__force void __user *)  __cu_from, __cu_len);  \
        }                                                                               \
        __cu_len;                                                                       \
@@ -280,8 +277,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
                                                                                        \
        __chk_user_ptr(__cu_from);                                                      \
        if (__access_ok(__cu_from, __cu_len, get_fs())) {                               \
-               if (!__builtin_constant_p(n))                                           \
-                       check_object_size(__cu_to, __cu_len, false);                    \
+               check_object_size(__cu_to, __cu_len, false);                    \
                __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);   \
        }                                                                               \
        __cu_len;                                                                       \
index cd87781..af12c2d 100644 (file)
@@ -1,6 +1,5 @@
 config PARISC
        def_bool y
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_IDE
        select HAVE_OPROFILE
index 1a8f6f9..f6a4c01 100644 (file)
@@ -245,7 +245,6 @@ CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_BLOCK_EXT_DEVT=y
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_KEYS=y
 # CONFIG_CRYPTO_HW is not set
 CONFIG_FONTS=y
index 7e07926..c564e6e 100644 (file)
@@ -291,7 +291,6 @@ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_ECB=m
 CONFIG_CRYPTO_PCBC=m
index 0f59fd9..e915048 100644 (file)
@@ -208,13 +208,13 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-extern void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-        __compiletime_error("copy_from_user() buffer size is not provably correct")
-#else
-        __compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+       WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
 
 static inline unsigned long __must_check copy_from_user(void *to,
                                           const void __user *from,
@@ -223,10 +223,12 @@ static inline unsigned long __must_check copy_from_user(void *to,
         int sz = __compiletime_object_size(to);
         int ret = -EFAULT;
 
-        if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
+        if (likely(sz == -1 || sz >= n))
                 ret = __copy_from_user(to, from, n);
-        else
-                copy_from_user_overflow();
+        else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
+       else
+                __bad_copy_user();
 
         return ret;
 }
index c1dc6c1..f1e3824 100644 (file)
@@ -311,14 +311,12 @@ static inline unsigned long copy_from_user(void *to,
        unsigned long over;
 
        if (access_ok(VERIFY_READ, from, n)) {
-               if (!__builtin_constant_p(n))
-                       check_object_size(to, n, false);
+               check_object_size(to, n, false);
                return __copy_tofrom_user((__force void __user *)to, from, n);
        }
        if ((unsigned long)from < TASK_SIZE) {
                over = (unsigned long)from + n - TASK_SIZE;
-               if (!__builtin_constant_p(n - over))
-                       check_object_size(to, n - over, false);
+               check_object_size(to, n - over, false);
                return __copy_tofrom_user((__force void __user *)to, from,
                                n - over) + over;
        }
@@ -331,14 +329,12 @@ static inline unsigned long copy_to_user(void __user *to,
        unsigned long over;
 
        if (access_ok(VERIFY_WRITE, to, n)) {
-               if (!__builtin_constant_p(n))
-                       check_object_size(from, n, true);
+               check_object_size(from, n, true);
                return __copy_tofrom_user(to, (__force void __user *)from, n);
        }
        if ((unsigned long)to < TASK_SIZE) {
                over = (unsigned long)to + n - TASK_SIZE;
-               if (!__builtin_constant_p(n))
-                       check_object_size(from, n - over, true);
+               check_object_size(from, n - over, true);
                return __copy_tofrom_user(to, (__force void __user *)from,
                                n - over) + over;
        }
@@ -383,8 +379,7 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
                        return 0;
        }
 
-       if (!__builtin_constant_p(n))
-               check_object_size(to, n, false);
+       check_object_size(to, n, false);
 
        return __copy_tofrom_user((__force void __user *)to, from, n);
 }
@@ -412,8 +407,8 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
                if (ret == 0)
                        return 0;
        }
-       if (!__builtin_constant_p(n))
-               check_object_size(from, n, true);
+
+       check_object_size(from, n, true);
 
        return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }
index 0a57fe6..aa8214f 100644 (file)
@@ -127,18 +127,19 @@ _GLOBAL(csum_partial_copy_generic)
        stw     r7,12(r1)
        stw     r8,8(r1)
 
-       rlwinm  r0,r4,3,0x8
-       rlwnm   r6,r6,r0,0,31   /* odd destination address: rotate one byte */
-       cmplwi  cr7,r0,0        /* is destination address even ? */
        addic   r12,r6,0
        addi    r6,r4,-4
        neg     r0,r4
        addi    r4,r3,-4
        andi.   r0,r0,CACHELINE_MASK    /* # bytes to start of cache line */
+       crset   4*cr7+eq
        beq     58f
 
        cmplw   0,r5,r0                 /* is this more than total to do? */
        blt     63f                     /* if not much to do */
+       rlwinm  r7,r6,3,0x8
+       rlwnm   r12,r12,r7,0,31 /* odd destination address: rotate one byte */
+       cmplwi  cr7,r7,0        /* is destination address even ? */
        andi.   r8,r0,3                 /* get it word-aligned first */
        mtctr   r8
        beq+    61f
index dfdb90c..9f19834 100644 (file)
@@ -113,7 +113,12 @@ BEGIN_FTR_SECTION
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
        b       slb_finish_load_1T
 
-0:
+0:     /*
+        * For userspace addresses, make sure this is region 0.
+        */
+       cmpdi   r9, 0
+       bne     8f
+
        /* when using slices, we extract the psize off the slice bitmaps
         * and then we need to get the sllp encoding off the mmu_psize_defs
         * array.
index 1321826..c16d790 100644 (file)
@@ -162,11 +162,12 @@ static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
 static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
 {
        struct pnv_phb *phb = pe->phb;
+       unsigned int pe_num = pe->pe_number;
 
        WARN_ON(pe->pdev);
 
        memset(pe, 0, sizeof(struct pnv_ioda_pe));
-       clear_bit(pe->pe_number, phb->ioda.pe_alloc);
+       clear_bit(pe_num, phb->ioda.pe_alloc);
 }
 
 /* The default M64 BAR is shared by all PEs */
@@ -3402,12 +3403,6 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
        struct pnv_phb *phb = pe->phb;
        struct pnv_ioda_pe *slave, *tmp;
 
-       /* Release slave PEs in compound PE */
-       if (pe->flags & PNV_IODA_PE_MASTER) {
-               list_for_each_entry_safe(slave, tmp, &pe->slaves, list)
-                       pnv_ioda_release_pe(slave);
-       }
-
        list_del(&pe->list);
        switch (phb->type) {
        case PNV_PHB_IODA1:
@@ -3422,6 +3417,15 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
 
        pnv_ioda_release_pe_seg(pe);
        pnv_ioda_deconfigure_pe(pe->phb, pe);
+
+       /* Release slave PEs in the compound PE */
+       if (pe->flags & PNV_IODA_PE_MASTER) {
+               list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
+                       list_del(&slave->list);
+                       pnv_ioda_free_pe(slave);
+               }
+       }
+
        pnv_ioda_free_pe(pe);
 }
 
index 4ffcaa6..a39d20e 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/root_dev.h>
 #include <linux/of.h>
 #include <linux/of_pci.h>
-#include <linux/kexec.h>
 
 #include <asm/mmu.h>
 #include <asm/processor.h>
@@ -66,6 +65,7 @@
 #include <asm/eeh.h>
 #include <asm/reg.h>
 #include <asm/plpar_wrappers.h>
+#include <asm/kexec.h>
 
 #include "pseries.h"
 
index 57d72f1..9114243 100644 (file)
 
 static void icp_opal_teardown_cpu(void)
 {
-       int cpu = smp_processor_id();
+       int hw_cpu = hard_smp_processor_id();
 
        /* Clear any pending IPI */
-       opal_int_set_mfrr(cpu, 0xff);
+       opal_int_set_mfrr(hw_cpu, 0xff);
 }
 
 static void icp_opal_flush_ipi(void)
@@ -101,14 +101,16 @@ static void icp_opal_eoi(struct irq_data *d)
 
 static void icp_opal_cause_ipi(int cpu, unsigned long data)
 {
-       opal_int_set_mfrr(cpu, IPI_PRIORITY);
+       int hw_cpu = get_hard_smp_processor_id(cpu);
+
+       opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
 }
 
 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
 {
-       int cpu = smp_processor_id();
+       int hw_cpu = hard_smp_processor_id();
 
-       opal_int_set_mfrr(cpu, 0xff);
+       opal_int_set_mfrr(hw_cpu, 0xff);
 
        return smp_ipi_demux();
 }
index e751fe2..c109f07 100644 (file)
@@ -68,7 +68,6 @@ config DEBUG_RODATA
 config S390
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_GCOV_PROFILE_ALL
index 26e0c7f..412b1bd 100644 (file)
@@ -602,7 +602,6 @@ CONFIG_FAIL_FUTEX=y
 CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_IRQSOFF_TRACER=y
 CONFIG_PREEMPT_TRACER=y
 CONFIG_SCHED_TRACER=y
index 24879da..bec279e 100644 (file)
@@ -552,7 +552,6 @@ CONFIG_NOTIFIER_ERROR_INJECTION=m
 CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
 CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_KPROBE_EVENT is not set
 CONFIG_TRACE_ENUM_MAP_FILE=y
index a5c1e5f..1751446 100644 (file)
@@ -549,7 +549,6 @@ CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
index 73610f2..2d40ef0 100644 (file)
@@ -172,7 +172,6 @@ CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_RCU_TRACE=y
 CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
index 9b49cf1..95aefdb 100644 (file)
@@ -311,6 +311,14 @@ int __get_user_bad(void) __attribute__((noreturn));
 #define __put_user_unaligned __put_user
 #define __get_user_unaligned __get_user
 
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+       WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
 /**
  * copy_to_user: - Copy a block of data into user space.
  * @to:   Destination address, in user space.
@@ -332,12 +340,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
        return __copy_to_user(to, from, n);
 }
 
-void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
-
 /**
  * copy_from_user: - Copy a block of data from user space.
  * @to:   Destination address, in kernel space.
@@ -362,7 +364,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
 
        might_fault();
        if (unlikely(sz != -1 && sz < n)) {
-               copy_from_user_overflow();
+               if (!__builtin_constant_p(n))
+                       copy_user_overflow(sz, n);
+               else
+                       __bad_copy_user();
                return n;
        }
        return __copy_from_user(to, from, n);
index 341a5a1..e722c51 100644 (file)
@@ -249,8 +249,7 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        if (n && __access_ok((unsigned long) to, n)) {
-               if (!__builtin_constant_p(n))
-                       check_object_size(from, n, true);
+               check_object_size(from, n, true);
                return __copy_user(to, (__force void __user *) from, n);
        } else
                return n;
@@ -258,16 +257,14 @@ static inline unsigned long copy_to_user(void __user *to, const void *from, unsi
 
 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       if (!__builtin_constant_p(n))
-               check_object_size(from, n, true);
+       check_object_size(from, n, true);
        return __copy_user(to, (__force void __user *) from, n);
 }
 
 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        if (n && __access_ok((unsigned long) from, n)) {
-               if (!__builtin_constant_p(n))
-                       check_object_size(to, n, false);
+               check_object_size(to, n, false);
                return __copy_user((__force void __user *) to, from, n);
        } else
                return n;
index 8bda94f..37a315d 100644 (file)
@@ -212,8 +212,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
 {
        unsigned long ret;
 
-       if (!__builtin_constant_p(size))
-               check_object_size(to, size, false);
+       check_object_size(to, size, false);
 
        ret = ___copy_from_user(to, from, size);
        if (unlikely(ret))
@@ -233,8 +232,8 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
 {
        unsigned long ret;
 
-       if (!__builtin_constant_p(size))
-               check_object_size(from, size, true);
+       check_object_size(from, size, true);
+
        ret = ___copy_to_user(to, from, size);
        if (unlikely(ret))
                ret = copy_to_user_fixup(to, from, size);
index 4820a02..78da75b 100644 (file)
@@ -4,7 +4,6 @@
 config TILE
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_WANT_FRAME_POINTERS
index 0a9c426..a77369e 100644 (file)
@@ -416,14 +416,13 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
        return n;
 }
 
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-/*
- * There are still unprovable places in the generic code as of 2.6.34, so this
- * option is not really compatible with -Werror, which is more useful in
- * general.
- */
-extern void copy_from_user_overflow(void)
-       __compiletime_warning("copy_from_user() size is not provably correct");
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+       WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
 
 static inline unsigned long __must_check copy_from_user(void *to,
                                          const void __user *from,
@@ -433,14 +432,13 @@ static inline unsigned long __must_check copy_from_user(void *to,
 
        if (likely(sz == -1 || sz >= n))
                n = _copy_from_user(to, from, n);
+       else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
        else
-               copy_from_user_overflow();
+               __bad_copy_user();
 
        return n;
 }
-#else
-#define copy_from_user _copy_from_user
-#endif
 
 #ifdef __tilegx__
 /**
index ef4b8f9..b783ac8 100644 (file)
@@ -21,21 +21,17 @@ void handle_syscall(struct uml_pt_regs *r)
        PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
 
        if (syscall_trace_enter(regs))
-               return;
+               goto out;
 
        /* Do the seccomp check after ptrace; failures should be fast. */
        if (secure_computing(NULL) == -1)
-               return;
+               goto out;
 
-       /* Update the syscall number after orig_ax has potentially been updated
-        * with ptrace.
-        */
-       UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
        syscall = UPT_SYSCALL_NR(r);
-
        if (syscall >= 0 && syscall <= __NR_syscall_max)
                PT_REGS_SET_SYSCALL_RETURN(regs,
                                EXECUTE_SYSCALL(syscall, regs));
 
+out:
        syscall_trace_leave(regs);
 }
index c580d8c..2a1f0ce 100644 (file)
@@ -24,7 +24,6 @@ config X86
        select ARCH_DISCARD_MEMBLOCK
        select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_FAST_MULTIPLIER
index 4e2ecfa..4b429df 100644 (file)
@@ -1 +1,3 @@
 CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+# CONFIG_HIGHMEM64G is not set
index a0ae610..e3af86f 100644 (file)
@@ -697,44 +697,15 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from,
 unsigned long __must_check _copy_to_user(void __user *to, const void *from,
                                         unsigned n);
 
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-# define copy_user_diag __compiletime_error
-#else
-# define copy_user_diag __compiletime_warning
-#endif
-
-extern void copy_user_diag("copy_from_user() buffer size is too small")
-copy_from_user_overflow(void);
-extern void copy_user_diag("copy_to_user() buffer size is too small")
-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
-
-#undef copy_user_diag
-
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-
-extern void
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
-__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
-#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
-
-extern void
-__compiletime_warning("copy_to_user() buffer size is not provably correct")
-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
-#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
-
-#else
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
 
-static inline void
-__copy_from_user_overflow(int size, unsigned long count)
+static inline void copy_user_overflow(int size, unsigned long count)
 {
        WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
 }
 
-#define __copy_to_user_overflow __copy_from_user_overflow
-
-#endif
-
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        int sz = __compiletime_object_size(to);
@@ -743,36 +714,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
 
        kasan_check_write(to, n);
 
-       /*
-        * While we would like to have the compiler do the checking for us
-        * even in the non-constant size case, any false positives there are
-        * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
-        * without - the [hopefully] dangerous looking nature of the warning
-        * would make people go look at the respecitive call sites over and
-        * over again just to find that there's no problem).
-        *
-        * And there are cases where it's just not realistic for the compiler
-        * to prove the count to be in range. For example when multiple call
-        * sites of a helper function - perhaps in different source files -
-        * all doing proper range checking, yet the helper function not doing
-        * so again.
-        *
-        * Therefore limit the compile time checking to the constant size
-        * case, and do only runtime checking for non-constant sizes.
-        */
-
        if (likely(sz < 0 || sz >= n)) {
                check_object_size(to, n, false);
                n = _copy_from_user(to, from, n);
-       } else if (__builtin_constant_p(n))
-               copy_from_user_overflow();
+       } else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
        else
-               __copy_from_user_overflow(sz, n);
+               __bad_copy_user();
 
        return n;
 }
 
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        int sz = __compiletime_object_size(from);
@@ -781,21 +734,17 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
 
        might_fault();
 
-       /* See the comment in copy_from_user() above. */
        if (likely(sz < 0 || sz >= n)) {
                check_object_size(from, n, true);
                n = _copy_to_user(to, from, n);
-       } else if (__builtin_constant_p(n))
-               copy_to_user_overflow();
+       } else if (!__builtin_constant_p(n))
+               copy_user_overflow(sz, n);
        else
-               __copy_to_user_overflow(sz, n);
+               __bad_copy_user();
 
        return n;
 }
 
-#undef __copy_from_user_overflow
-#undef __copy_to_user_overflow
-
 /*
  * We rely on the nested NMI work to allow atomic faults from the NMI path; the
  * nested NMI paths are careful to preserve CR2.
index f5c69d8..b81fe2d 100644 (file)
@@ -669,6 +669,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
                set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
 }
 
+#define MSR_AMD64_DE_CFG       0xC0011029
+
+static void init_amd_ln(struct cpuinfo_x86 *c)
+{
+       /*
+        * Apply erratum 665 fix unconditionally so machines without a BIOS
+        * fix work.
+        */
+       msr_set_bit(MSR_AMD64_DE_CFG, 31);
+}
+
 static void init_amd_bd(struct cpuinfo_x86 *c)
 {
        u64 value;
@@ -726,6 +737,7 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 6:    init_amd_k7(c); break;
        case 0xf:  init_amd_k8(c); break;
        case 0x10: init_amd_gh(c); break;
+       case 0x12: init_amd_ln(c); break;
        case 0x15: init_amd_bd(c); break;
        }
 
index ad5bc95..1acfd76 100644 (file)
@@ -56,12 +56,12 @@ asm (".pushsection .entry.text, \"ax\"\n"
      ".popsection");
 
 /* identity function, which can be inlined */
-u32 _paravirt_ident_32(u32 x)
+u32 notrace _paravirt_ident_32(u32 x)
 {
        return x;
 }
 
-u64 _paravirt_ident_64(u64 x)
+u64 notrace _paravirt_ident_64(u64 x)
 {
        return x;
 }
index ecb1b69..170cc4f 100644 (file)
@@ -927,9 +927,10 @@ int track_pfn_copy(struct vm_area_struct *vma)
 }
 
 /*
- * prot is passed in as a parameter for the new mapping. If the vma has a
- * linear pfn mapping for the entire range reserve the entire vma range with
- * single reserve_pfn_range call.
+ * prot is passed in as a parameter for the new mapping. If the vma has
+ * a linear pfn mapping for the entire range, or no vma is provided,
+ * reserve the entire pfn + size range with single reserve_pfn_range
+ * call.
  */
 int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
                    unsigned long pfn, unsigned long addr, unsigned long size)
@@ -938,11 +939,12 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
        enum page_cache_mode pcm;
 
        /* reserve the whole chunk starting from paddr */
-       if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
+       if (!vma || (addr == vma->vm_start
+                               && size == (vma->vm_end - vma->vm_start))) {
                int ret;
 
                ret = reserve_pfn_range(paddr, size, prot, 0);
-               if (!ret)
+               if (ret == 0 && vma)
                        vma->vm_flags |= VM_PAT;
                return ret;
        }
@@ -997,7 +999,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
        resource_size_t paddr;
        unsigned long prot;
 
-       if (!(vma->vm_flags & VM_PAT))
+       if (vma && !(vma->vm_flags & VM_PAT))
                return;
 
        /* free the chunk starting from pfn or the whole chunk */
@@ -1011,7 +1013,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
                size = vma->vm_end - vma->vm_start;
        }
        free_pfn_range(paddr, size);
-       vma->vm_flags &= ~VM_PAT;
+       if (vma)
+               vma->vm_flags &= ~VM_PAT;
 }
 
 /*
index ebd4dd6..a7ef7b1 100644 (file)
@@ -84,7 +84,10 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
        case EAX:
        case EIP:
        case UESP:
+               break;
        case ORIG_EAX:
+               /* Update the syscall number. */
+               UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
                break;
        case FS:
                if (value && (value & 3) != 3)
index faab418..0b5c184 100644 (file)
@@ -78,7 +78,11 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
        case RSI:
        case RDI:
        case RBP:
+               break;
+
        case ORIG_RAX:
+               /* Update the syscall number. */
+               UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
                break;
 
        case FS:
index cf8037a..77207b4 100644 (file)
@@ -733,13 +733,14 @@ static void cryptd_aead_crypt(struct aead_request *req,
        rctx = aead_request_ctx(req);
        compl = rctx->complete;
 
+       tfm = crypto_aead_reqtfm(req);
+
        if (unlikely(err == -EINPROGRESS))
                goto out;
        aead_request_set_tfm(req, child);
        err = crypt( req );
 
 out:
-       tfm = crypto_aead_reqtfm(req);
        ctx = crypto_aead_ctx(tfm);
        refcnt = atomic_read(&ctx->refcnt);
 
index 4c745bf..161f915 100644 (file)
@@ -42,7 +42,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
                list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
                        struct acpi_nfit_system_address *spa = nfit_spa->spa;
 
-                       if (nfit_spa_type(spa) == NFIT_SPA_PM)
+                       if (nfit_spa_type(spa) != NFIT_SPA_PM)
                                continue;
                        /* find the spa that covers the mce addr */
                        if (spa->address > mce->addr)
index ad9fc84..e878fc7 100644 (file)
@@ -2054,7 +2054,7 @@ int __init acpi_scan_init(void)
 
 static struct acpi_probe_entry *ape;
 static int acpi_probe_count;
-static DEFINE_SPINLOCK(acpi_probe_lock);
+static DEFINE_MUTEX(acpi_probe_mutex);
 
 static int __init acpi_match_madt(struct acpi_subtable_header *header,
                                  const unsigned long end)
@@ -2073,7 +2073,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
        if (acpi_disabled)
                return 0;
 
-       spin_lock(&acpi_probe_lock);
+       mutex_lock(&acpi_probe_mutex);
        for (ape = ap_head; nr; ape++, nr--) {
                if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
                        acpi_probe_count = 0;
@@ -2086,7 +2086,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
                                count++;
                }
        }
-       spin_unlock(&acpi_probe_lock);
+       mutex_unlock(&acpi_probe_mutex);
 
        return count;
 }
index 7461a58..dcf2c72 100644 (file)
@@ -2524,7 +2524,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
 
                /* Do not receive interrupts sent by dummy ports */
                if (!pp) {
-                       disable_irq(irq + i);
+                       disable_irq(irq);
                        continue;
                }
 
index 633aa29..44f97ad 100644 (file)
@@ -144,7 +144,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
        ap->ioaddr.altstatus_addr = base + 0x1E;
        ap->ioaddr.bmdma_addr = base;
        ata_sff_std_ports(&ap->ioaddr);
-       ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+       ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
 
        ninja32_program(base);
        /* FIXME: Should we disable them at remove ? */
index 9d8807e..b275676 100644 (file)
@@ -1885,9 +1885,9 @@ static int open_tx(struct atm_vcc *vcc)
                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
                     return ret;
                 }
-       } 
-       else  
-           printk("iadev:  Non UBR, ABR and CBR traffic not supportedn"); 
+       } else {
+               printk("iadev:  Non UBR, ABR and CBR traffic not supported\n");
+       }
         
         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
        IF_EVENT(printk("ia open_tx returning \n");)  
index e097d35..17995fa 100644 (file)
@@ -301,7 +301,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
        int (*callback)(struct device *);
        int retval;
 
-       trace_rpm_idle(dev, rpmflags);
+       trace_rpm_idle_rcuidle(dev, rpmflags);
        retval = rpm_check_suspend_allowed(dev);
        if (retval < 0)
                ;       /* Conditions are wrong. */
@@ -337,7 +337,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
                        dev->power.request_pending = true;
                        queue_work(pm_wq, &dev->power.work);
                }
-               trace_rpm_return_int(dev, _THIS_IP_, 0);
+               trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
                return 0;
        }
 
@@ -352,7 +352,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
        wake_up_all(&dev->power.wait_queue);
 
  out:
-       trace_rpm_return_int(dev, _THIS_IP_, retval);
+       trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
        return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 }
 
@@ -601,7 +601,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
        struct device *parent = NULL;
        int retval = 0;
 
-       trace_rpm_resume(dev, rpmflags);
+       trace_rpm_resume_rcuidle(dev, rpmflags);
 
  repeat:
        if (dev->power.runtime_error)
@@ -764,7 +764,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
                spin_lock_irq(&dev->power.lock);
        }
 
-       trace_rpm_return_int(dev, _THIS_IP_, retval);
+       trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
 
        return retval;
 }
index aa56af8..b11af3f 100644 (file)
@@ -404,6 +404,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
                unsigned int new_base_reg, new_top_reg;
                unsigned int min, max;
                unsigned int max_dist;
+               unsigned int dist, best_dist = UINT_MAX;
 
                max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
                        map->cache_word_size;
@@ -423,24 +424,41 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
                                &base_reg, &top_reg);
 
                        if (base_reg <= max && top_reg >= min) {
-                               new_base_reg = min(reg, base_reg);
-                               new_top_reg = max(reg, top_reg);
-                       } else {
-                               if (max < base_reg)
-                                       node = node->rb_left;
+                               if (reg < base_reg)
+                                       dist = base_reg - reg;
+                               else if (reg > top_reg)
+                                       dist = reg - top_reg;
                                else
-                                       node = node->rb_right;
-
-                               continue;
+                                       dist = 0;
+                               if (dist < best_dist) {
+                                       rbnode = rbnode_tmp;
+                                       best_dist = dist;
+                                       new_base_reg = min(reg, base_reg);
+                                       new_top_reg = max(reg, top_reg);
+                               }
                        }
 
-                       ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
+                       /*
+                        * Keep looking, we want to choose the closest block,
+                        * otherwise we might end up creating overlapping
+                        * blocks, which breaks the rbtree.
+                        */
+                       if (reg < base_reg)
+                               node = node->rb_left;
+                       else if (reg > top_reg)
+                               node = node->rb_right;
+                       else
+                               break;
+               }
+
+               if (rbnode) {
+                       ret = regcache_rbtree_insert_to_block(map, rbnode,
                                                              new_base_reg,
                                                              new_top_reg, reg,
                                                              value);
                        if (ret)
                                return ret;
-                       rbtree_ctx->cached_rbnode = rbnode_tmp;
+                       rbtree_ctx->cached_rbnode = rbnode;
                        return 0;
                }
 
index df7ff72..4e58256 100644 (file)
@@ -38,10 +38,11 @@ static int regcache_hw_init(struct regmap *map)
 
        /* calculate the size of reg_defaults */
        for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
-               if (!regmap_volatile(map, i * map->reg_stride))
+               if (regmap_readable(map, i * map->reg_stride) &&
+                   !regmap_volatile(map, i * map->reg_stride))
                        count++;
 
-       /* all registers are volatile, so just bypass */
+       /* all registers are unreadable or volatile, so just bypass */
        if (!count) {
                map->cache_bypass = true;
                return 0;
index 51fa7d6..25d26bb 100644 (file)
@@ -1474,6 +1474,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
                ret = map->bus->write(map->bus_context, buf, len);
 
                kfree(buf);
+       } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
+               regcache_drop_region(map, reg, reg + 1);
        }
 
        trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
index 921ce18..b4f6520 100644 (file)
@@ -36,12 +36,31 @@ u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc)
 }
 EXPORT_SYMBOL_GPL(bcma_chipco_get_alp_clock);
 
+static bool bcma_core_cc_has_pmu_watchdog(struct bcma_drv_cc *cc)
+{
+       struct bcma_bus *bus = cc->core->bus;
+
+       if (cc->capabilities & BCMA_CC_CAP_PMU) {
+               if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573) {
+                       WARN(bus->chipinfo.rev <= 1, "No watchdog available\n");
+                       /* 53573B0 and 53573B1 have bugged PMU watchdog. It can
+                        * be enabled but timer can't be bumped. Use CC one
+                        * instead.
+                        */
+                       return false;
+               }
+               return true;
+       } else {
+               return false;
+       }
+}
+
 static u32 bcma_chipco_watchdog_get_max_timer(struct bcma_drv_cc *cc)
 {
        struct bcma_bus *bus = cc->core->bus;
        u32 nb;
 
-       if (cc->capabilities & BCMA_CC_CAP_PMU) {
+       if (bcma_core_cc_has_pmu_watchdog(cc)) {
                if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
                        nb = 32;
                else if (cc->core->id.rev < 26)
@@ -95,9 +114,16 @@ static int bcma_chipco_watchdog_ticks_per_ms(struct bcma_drv_cc *cc)
 
 int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
 {
+       struct bcma_bus *bus = cc->core->bus;
        struct bcm47xx_wdt wdt = {};
        struct platform_device *pdev;
 
+       if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573 &&
+           bus->chipinfo.rev <= 1) {
+               pr_debug("No watchdog on 53573A0 / 53573A1\n");
+               return 0;
+       }
+
        wdt.driver_data = cc;
        wdt.timer_set = bcma_chipco_watchdog_timer_set_wdt;
        wdt.timer_set_ms = bcma_chipco_watchdog_timer_set_ms_wdt;
@@ -105,7 +131,7 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
                bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
 
        pdev = platform_device_register_data(NULL, "bcm47xx-wdt",
-                                            cc->core->bus->num, &wdt,
+                                            bus->num, &wdt,
                                             sizeof(wdt));
        if (IS_ERR(pdev))
                return PTR_ERR(pdev);
@@ -217,7 +243,7 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
        u32 maxt;
 
        maxt = bcma_chipco_watchdog_get_max_timer(cc);
-       if (cc->capabilities & BCMA_CC_CAP_PMU) {
+       if (bcma_core_cc_has_pmu_watchdog(cc)) {
                if (ticks == 1)
                        ticks = 2;
                else if (ticks > maxt)
index 1f63547..2c1798e 100644 (file)
@@ -209,6 +209,8 @@ static void bcma_of_fill_device(struct platform_device *parent,
                core->dev.of_node = node;
 
        core->irq = bcma_of_get_irq(parent, core, 0);
+
+       of_dma_configure(&core->dev, node);
 }
 
 unsigned int bcma_core_irq(struct bcma_device *core, int num)
@@ -248,12 +250,12 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
                core->irq = bus->host_pci->irq;
                break;
        case BCMA_HOSTTYPE_SOC:
-               core->dev.dma_mask = &core->dev.coherent_dma_mask;
-               if (bus->host_pdev) {
+               if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
                        core->dma_dev = &bus->host_pdev->dev;
                        core->dev.parent = &bus->host_pdev->dev;
                        bcma_of_fill_device(bus->host_pdev, core);
                } else {
+                       core->dev.dma_mask = &core->dev.coherent_dma_mask;
                        core->dma_dev = &core->dev;
                }
                break;
index 5755907..ffa7c9d 100644 (file)
@@ -551,7 +551,7 @@ static struct attribute *cci5xx_pmu_event_attrs[] = {
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
-       CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE),
+       CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
        CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
        NULL
 };
index 97a9185..884c030 100644 (file)
@@ -187,6 +187,7 @@ struct arm_ccn {
        struct arm_ccn_component *xp;
 
        struct arm_ccn_dt dt;
+       int mn_id;
 };
 
 static DEFINE_MUTEX(arm_ccn_mutex);
@@ -212,6 +213,7 @@ static int arm_ccn_node_to_xp_port(int node)
 #define CCN_CONFIG_TYPE(_config)       (((_config) >> 8) & 0xff)
 #define CCN_CONFIG_EVENT(_config)      (((_config) >> 16) & 0xff)
 #define CCN_CONFIG_PORT(_config)       (((_config) >> 24) & 0x3)
+#define CCN_CONFIG_BUS(_config)                (((_config) >> 24) & 0x3)
 #define CCN_CONFIG_VC(_config)         (((_config) >> 26) & 0x7)
 #define CCN_CONFIG_DIR(_config)                (((_config) >> 29) & 0x1)
 #define CCN_CONFIG_MASK(_config)       (((_config) >> 30) & 0xf)
@@ -241,6 +243,7 @@ static CCN_FORMAT_ATTR(xp, "config:0-7");
 static CCN_FORMAT_ATTR(type, "config:8-15");
 static CCN_FORMAT_ATTR(event, "config:16-23");
 static CCN_FORMAT_ATTR(port, "config:24-25");
+static CCN_FORMAT_ATTR(bus, "config:24-25");
 static CCN_FORMAT_ATTR(vc, "config:26-28");
 static CCN_FORMAT_ATTR(dir, "config:29-29");
 static CCN_FORMAT_ATTR(mask, "config:30-33");
@@ -253,6 +256,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = {
        &arm_ccn_pmu_format_attr_type.attr.attr,
        &arm_ccn_pmu_format_attr_event.attr.attr,
        &arm_ccn_pmu_format_attr_port.attr.attr,
+       &arm_ccn_pmu_format_attr_bus.attr.attr,
        &arm_ccn_pmu_format_attr_vc.attr.attr,
        &arm_ccn_pmu_format_attr_dir.attr.attr,
        &arm_ccn_pmu_format_attr_mask.attr.attr,
@@ -328,6 +332,7 @@ struct arm_ccn_pmu_event {
 static ssize_t arm_ccn_pmu_event_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
+       struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
        struct arm_ccn_pmu_event *event = container_of(attr,
                        struct arm_ccn_pmu_event, attr);
        ssize_t res;
@@ -349,10 +354,17 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
                break;
        case CCN_TYPE_XP:
                res += snprintf(buf + res, PAGE_SIZE - res,
-                               ",xp=?,port=?,vc=?,dir=?");
+                               ",xp=?,vc=?");
                if (event->event == CCN_EVENT_WATCHPOINT)
                        res += snprintf(buf + res, PAGE_SIZE - res,
-                                       ",cmp_l=?,cmp_h=?,mask=?");
+                                       ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
+               else
+                       res += snprintf(buf + res, PAGE_SIZE - res,
+                                       ",bus=?");
+
+               break;
+       case CCN_TYPE_MN:
+               res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
                break;
        default:
                res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
@@ -383,9 +395,9 @@ static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
 }
 
 static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
-       CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
-       CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
-       CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
+       CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
+       CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
+       CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
        CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
        CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
        CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
@@ -733,9 +745,10 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
 
        if (has_branch_stack(event) || event->attr.exclude_user ||
                        event->attr.exclude_kernel || event->attr.exclude_hv ||
-                       event->attr.exclude_idle) {
+                       event->attr.exclude_idle || event->attr.exclude_host ||
+                       event->attr.exclude_guest) {
                dev_warn(ccn->dev, "Can't exclude execution levels!\n");
-               return -EOPNOTSUPP;
+               return -EINVAL;
        }
 
        if (event->cpu < 0) {
@@ -759,6 +772,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
 
        /* Validate node/xp vs topology */
        switch (type) {
+       case CCN_TYPE_MN:
+               if (node_xp != ccn->mn_id) {
+                       dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
+                       return -EINVAL;
+               }
+               break;
        case CCN_TYPE_XP:
                if (node_xp >= ccn->num_xps) {
                        dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
@@ -886,6 +905,10 @@ static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
        struct arm_ccn_component *xp;
        u32 val, dt_cfg;
 
+       /* Nothing to do for cycle counter */
+       if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
+               return;
+
        if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
                xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
        else
@@ -917,38 +940,17 @@ static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
                        arm_ccn_pmu_read_counter(ccn, hw->idx));
        hw->state = 0;
 
-       /*
-        * Pin the timer, so that the overflows are handled by the chosen
-        * event->cpu (this is the same one as presented in "cpumask"
-        * attribute).
-        */
-       if (!ccn->irq)
-               hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
-                               HRTIMER_MODE_REL_PINNED);
-
        /* Set the DT bus input, engaging the counter */
        arm_ccn_pmu_xp_dt_config(event, 1);
 }
 
 static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
 {
-       struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
        struct hw_perf_event *hw = &event->hw;
-       u64 timeout;
 
        /* Disable counting, setting the DT bus to pass-through mode */
        arm_ccn_pmu_xp_dt_config(event, 0);
 
-       if (!ccn->irq)
-               hrtimer_cancel(&ccn->dt.hrtimer);
-
-       /* Let the DT bus drain */
-       timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
-                       ccn->num_xps;
-       while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
-                       timeout)
-               cpu_relax();
-
        if (flags & PERF_EF_UPDATE)
                arm_ccn_pmu_event_update(event);
 
@@ -988,7 +990,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
 
        /* Comparison values */
        writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
-       writel((cmp_l >> 32) & 0xefffffff,
+       writel((cmp_l >> 32) & 0x7fffffff,
                        source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
        writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
        writel((cmp_h >> 32) & 0x0fffffff,
@@ -996,7 +998,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
 
        /* Mask */
        writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
-       writel((mask_l >> 32) & 0xefffffff,
+       writel((mask_l >> 32) & 0x7fffffff,
                        source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
        writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
        writel((mask_h >> 32) & 0x0fffffff,
@@ -1014,7 +1016,7 @@ static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
        hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
 
        id = (CCN_CONFIG_VC(event->attr.config) << 4) |
-                       (CCN_CONFIG_PORT(event->attr.config) << 3) |
+                       (CCN_CONFIG_BUS(event->attr.config) << 3) |
                        (CCN_CONFIG_EVENT(event->attr.config) << 0);
 
        val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
@@ -1099,15 +1101,31 @@ static void arm_ccn_pmu_event_config(struct perf_event *event)
        spin_unlock(&ccn->dt.config_lock);
 }
 
+static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn)
+{
+       return bitmap_weight(ccn->dt.pmu_counters_mask,
+                            CCN_NUM_PMU_EVENT_COUNTERS + 1);
+}
+
 static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
 {
        int err;
        struct hw_perf_event *hw = &event->hw;
+       struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
 
        err = arm_ccn_pmu_event_alloc(event);
        if (err)
                return err;
 
+       /*
+        * Pin the timer, so that the overflows are handled by the chosen
+        * event->cpu (this is the same one as presented in "cpumask"
+        * attribute).
+        */
+       if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1)
+               hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
+                             HRTIMER_MODE_REL_PINNED);
+
        arm_ccn_pmu_event_config(event);
 
        hw->state = PERF_HES_STOPPED;
@@ -1120,9 +1138,14 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
 
 static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
 {
+       struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+
        arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
 
        arm_ccn_pmu_event_release(event);
+
+       if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0)
+               hrtimer_cancel(&ccn->dt.hrtimer);
 }
 
 static void arm_ccn_pmu_event_read(struct perf_event *event)
@@ -1130,6 +1153,24 @@ static void arm_ccn_pmu_event_read(struct perf_event *event)
        arm_ccn_pmu_event_update(event);
 }
 
+static void arm_ccn_pmu_enable(struct pmu *pmu)
+{
+       struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+       u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+       val |= CCN_DT_PMCR__PMU_EN;
+       writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
+static void arm_ccn_pmu_disable(struct pmu *pmu)
+{
+       struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+       u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+       val &= ~CCN_DT_PMCR__PMU_EN;
+       writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
 static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
 {
        u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
@@ -1252,6 +1293,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
                .start = arm_ccn_pmu_event_start,
                .stop = arm_ccn_pmu_event_stop,
                .read = arm_ccn_pmu_event_read,
+               .pmu_enable = arm_ccn_pmu_enable,
+               .pmu_disable = arm_ccn_pmu_disable,
        };
 
        /* No overflow interrupt? Have to use a timer instead. */
@@ -1361,6 +1404,8 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
 
        switch (type) {
        case CCN_TYPE_MN:
+               ccn->mn_id = id;
+               return 0;
        case CCN_TYPE_DT:
                return 0;
        case CCN_TYPE_XP:
@@ -1471,8 +1516,9 @@ static int arm_ccn_probe(struct platform_device *pdev)
                /* Can set 'disable' bits, so can acknowledge interrupts */
                writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
                                ccn->base + CCN_MN_ERRINT_STATUS);
-               err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0,
-                               dev_name(ccn->dev), ccn);
+               err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler,
+                                      IRQF_NOBALANCING | IRQF_NO_THREAD,
+                                      dev_name(ccn->dev), ccn);
                if (err)
                        return err;
 
index c3cb76b..9efdf1d 100644 (file)
@@ -178,6 +178,7 @@ static int vexpress_config_populate(struct device_node *node)
 
        parent = class_find_device(vexpress_config_class, NULL, bridge,
                        vexpress_config_node_match);
+       of_node_put(bridge);
        if (WARN_ON(!parent))
                return -ENODEV;
 
index 56ad5a5..8c0770b 100644 (file)
@@ -244,7 +244,7 @@ config HW_RANDOM_TX4939
 
 config HW_RANDOM_MXC_RNGA
        tristate "Freescale i.MX RNGA Random Number Generator"
-       depends on ARCH_HAS_RNGA
+       depends on SOC_IMX31
        default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
index 08c7e23..0c75c3f 100644 (file)
@@ -957,7 +957,7 @@ int tpm2_auto_startup(struct tpm_chip *chip)
                goto out;
 
        rc = tpm2_do_selftest(chip);
-       if (rc != TPM2_RC_INITIALIZE) {
+       if (rc != 0 && rc != TPM2_RC_INITIALIZE) {
                dev_err(&chip->dev, "TPM self test failed\n");
                goto out;
        }
@@ -974,7 +974,6 @@ int tpm2_auto_startup(struct tpm_chip *chip)
                }
        }
 
-       return rc;
 out:
        if (rc > 0)
                rc = -ENODEV;
index d2406fe..5da47e2 100644 (file)
@@ -165,6 +165,12 @@ struct ports_device {
         */
        struct virtqueue *c_ivq, *c_ovq;
 
+       /*
+        * A control packet buffer for guest->host requests, protected
+        * by c_ovq_lock.
+        */
+       struct virtio_console_control cpkt;
+
        /* Array of per-port IO virtqueues */
        struct virtqueue **in_vqs, **out_vqs;
 
@@ -560,28 +566,29 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
                                  unsigned int event, unsigned int value)
 {
        struct scatterlist sg[1];
-       struct virtio_console_control cpkt;
        struct virtqueue *vq;
        unsigned int len;
 
        if (!use_multiport(portdev))
                return 0;
 
-       cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
-       cpkt.event = cpu_to_virtio16(portdev->vdev, event);
-       cpkt.value = cpu_to_virtio16(portdev->vdev, value);
-
        vq = portdev->c_ovq;
 
-       sg_init_one(sg, &cpkt, sizeof(cpkt));
-
        spin_lock(&portdev->c_ovq_lock);
-       if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
+
+       portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
+       portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event);
+       portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value);
+
+       sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control));
+
+       if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) {
                virtqueue_kick(vq);
                while (!virtqueue_get_buf(vq, &len)
                        && !virtqueue_is_broken(vq))
                        cpu_relax();
        }
+
        spin_unlock(&portdev->c_ovq_lock);
        return 0;
 }
index d359c92..e38bf60 100644 (file)
@@ -69,6 +69,7 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
        DEF_FIXED(".s1",        CLK_S1,            CLK_PLL1_DIV2,  3, 1),
        DEF_FIXED(".s2",        CLK_S2,            CLK_PLL1_DIV2,  4, 1),
        DEF_FIXED(".s3",        CLK_S3,            CLK_PLL1_DIV2,  6, 1),
+       DEF_FIXED(".sdsrc",     CLK_SDSRC,         CLK_PLL1_DIV2,  2, 1),
 
        /* Core Clock Outputs */
        DEF_FIXED("ztr",        R8A7795_CLK_ZTR,   CLK_PLL1_DIV2,  6, 1),
@@ -87,10 +88,10 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
        DEF_FIXED("s3d2",       R8A7795_CLK_S3D2,  CLK_S3,         2, 1),
        DEF_FIXED("s3d4",       R8A7795_CLK_S3D4,  CLK_S3,         4, 1),
 
-       DEF_GEN3_SD("sd0",      R8A7795_CLK_SD0,   CLK_PLL1_DIV2, 0x0074),
-       DEF_GEN3_SD("sd1",      R8A7795_CLK_SD1,   CLK_PLL1_DIV2, 0x0078),
-       DEF_GEN3_SD("sd2",      R8A7795_CLK_SD2,   CLK_PLL1_DIV2, 0x0268),
-       DEF_GEN3_SD("sd3",      R8A7795_CLK_SD3,   CLK_PLL1_DIV2, 0x026c),
+       DEF_GEN3_SD("sd0",      R8A7795_CLK_SD0,   CLK_SDSRC,     0x0074),
+       DEF_GEN3_SD("sd1",      R8A7795_CLK_SD1,   CLK_SDSRC,     0x0078),
+       DEF_GEN3_SD("sd2",      R8A7795_CLK_SD2,   CLK_SDSRC,     0x0268),
+       DEF_GEN3_SD("sd3",      R8A7795_CLK_SD3,   CLK_SDSRC,     0x026c),
 
        DEF_FIXED("cl",         R8A7795_CLK_CL,    CLK_PLL1_DIV2, 48, 1),
        DEF_FIXED("cp",         R8A7795_CLK_CP,    CLK_EXTAL,      2, 1),
index c109d80..cdfabeb 100644 (file)
@@ -833,9 +833,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
 
        /* perihp */
        GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
-                       RK3399_CLKGATE_CON(5), 0, GFLAGS),
-       GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
                        RK3399_CLKGATE_CON(5), 1, GFLAGS),
+       GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
+                       RK3399_CLKGATE_CON(5), 0, GFLAGS),
        COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
                        RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
                        RK3399_CLKGATE_CON(5), 2, GFLAGS),
@@ -923,9 +923,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
                        RK3399_CLKGATE_CON(6), 14, GFLAGS),
 
        GATE(0, "cpll_aclk_emmc_src", "cpll", CLK_IGNORE_UNUSED,
-                       RK3399_CLKGATE_CON(6), 12, GFLAGS),
-       GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
                        RK3399_CLKGATE_CON(6), 13, GFLAGS),
+       GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
+                       RK3399_CLKGATE_CON(6), 12, GFLAGS),
        COMPOSITE_NOGATE(ACLK_EMMC, "aclk_emmc", mux_aclk_emmc_p, CLK_IGNORE_UNUSED,
                        RK3399_CLKSEL_CON(21), 7, 1, MFLAGS, 0, 5, DFLAGS),
        GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", CLK_IGNORE_UNUSED,
@@ -1071,7 +1071,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
        /* vio */
        COMPOSITE(ACLK_VIO, "aclk_vio", mux_pll_src_cpll_gpll_ppll_p, CLK_IGNORE_UNUSED,
                        RK3399_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS,
-                       RK3399_CLKGATE_CON(11), 10, GFLAGS),
+                       RK3399_CLKGATE_CON(11), 0, GFLAGS),
        COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", 0,
                        RK3399_CLKSEL_CON(43), 0, 5, DFLAGS,
                        RK3399_CLKGATE_CON(11), 1, GFLAGS),
@@ -1484,6 +1484,7 @@ static const char *const rk3399_cru_critical_clocks[] __initconst = {
        "hclk_perilp1",
        "hclk_perilp1_noc",
        "aclk_dmac0_perilp",
+       "aclk_emmc_noc",
        "gpll_hclk_perilp1_src",
        "gpll_aclk_perilp0_src",
        "gpll_aclk_perihp_src",
index fc17b52..51d4bac 100644 (file)
@@ -31,7 +31,7 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
                return;
 
        WARN_ON(readl_relaxed_poll_timeout(common->base + common->reg, reg,
-                                          !(reg & lock), 100, 70000));
+                                          reg & lock, 100, 70000));
 }
 
 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
index 64da7b7..933b5dd 100644 (file)
@@ -428,7 +428,7 @@ static struct tegra_clk_pll_params pll_d_params = {
        .div_nmp = &pllp_nmp,
        .freq_table = pll_d_freq_table,
        .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
-                TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+                TEGRA_PLL_HAS_LOCK_ENABLE,
 };
 
 static struct tegra_clk_pll_params pll_d2_params = {
@@ -446,7 +446,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
        .div_nmp = &pllp_nmp,
        .freq_table = pll_d_freq_table,
        .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
-                TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+                TEGRA_PLL_HAS_LOCK_ENABLE,
 };
 
 static const struct pdiv_map pllu_p[] = {
index 3494bc5..7f0f5b2 100644 (file)
@@ -240,6 +240,7 @@ static int __init at91sam926x_pit_common_init(struct pit_data *data)
 static int __init at91sam926x_pit_dt_init(struct device_node *node)
 {
        struct pit_data *data;
+       int ret;
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
index 0bb44d5..2ee40fd 100644 (file)
@@ -74,6 +74,8 @@ static const struct of_device_id machines[] __initconst = {
        { .compatible = "ti,omap5", },
 
        { .compatible = "xlnx,zynq-7000", },
+
+       { }
 };
 
 static int __init cpufreq_dt_platdev_init(void)
index 6dc5971..b304421 100644 (file)
@@ -556,7 +556,10 @@ skip_enc:
 
        /* Read and write assoclen bytes */
        append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-       append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+       if (alg->caam.geniv)
+               append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+       else
+               append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
        /* Skip assoc data */
        append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -565,6 +568,14 @@ skip_enc:
        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
                             KEY_VLF);
 
+       if (alg->caam.geniv) {
+               append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+                               LDST_SRCDST_BYTE_CONTEXT |
+                               (ctx1_iv_off << LDST_OFFSET_SHIFT));
+               append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+                           (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+       }
+
        /* Load Counter into CONTEXT1 reg */
        if (is_rfc3686)
                append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -2150,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req,
 
        init_aead_job(req, edesc, all_contig, encrypt);
 
-       if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
+       if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
                append_load_as_imm(desc, req->iv, ivsize,
                                   LDST_CLASS_1_CCB |
                                   LDST_SRCDST_BYTE_CONTEXT |
@@ -2537,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req)
        return ret;
 }
 
-static int aead_givdecrypt(struct aead_request *req)
-{
-       struct crypto_aead *aead = crypto_aead_reqtfm(req);
-       unsigned int ivsize = crypto_aead_ivsize(aead);
-
-       if (req->cryptlen < ivsize)
-               return -EINVAL;
-
-       req->cryptlen -= ivsize;
-       req->assoclen += ivsize;
-
-       return aead_decrypt(req);
-}
-
 /*
  * allocate and map the ablkcipher extended descriptor for ablkcipher
  */
@@ -3210,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -3256,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -3302,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -3348,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -3394,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -3440,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
@@ -3486,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -3534,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -3582,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -3630,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -3678,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -3726,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
@@ -3772,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -3818,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -3864,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -3910,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -3956,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -4002,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
@@ -4051,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
@@ -4102,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
@@ -4153,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
@@ -4204,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
@@ -4255,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
@@ -4306,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = {
                        .setkey = aead_setkey,
                        .setauthsize = aead_setauthsize,
                        .encrypt = aead_encrypt,
-                       .decrypt = aead_givdecrypt,
+                       .decrypt = aead_decrypt,
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
index 769148d..20f35df 100644 (file)
@@ -1260,8 +1260,8 @@ static struct crypto_alg qat_algs[] = { {
                        .setkey = qat_alg_ablkcipher_xts_setkey,
                        .decrypt = qat_alg_ablkcipher_decrypt,
                        .encrypt = qat_alg_ablkcipher_encrypt,
-                       .min_keysize = AES_MIN_KEY_SIZE,
-                       .max_keysize = AES_MAX_KEY_SIZE,
+                       .min_keysize = 2 * AES_MIN_KEY_SIZE,
+                       .max_keysize = 2 * AES_MAX_KEY_SIZE,
                        .ivsize = AES_BLOCK_SIZE,
                },
        },
index cfb2541..24353ec 100644 (file)
@@ -129,8 +129,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
 
                blkcipher_walk_init(&walk, dst, src, nbytes);
 
-               iv = (u8 *)walk.iv;
                ret = blkcipher_walk_virt(desc, &walk);
+               iv = walk.iv;
                memset(tweak, 0, AES_BLOCK_SIZE);
                aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
 
index 803f395..29f600f 100644 (file)
@@ -459,7 +459,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
        }
 
        pgoff = linear_page_index(vma, pmd_addr);
-       phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE);
+       phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
        if (phys == -1) {
                dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
                                pgoff);
index e434ffe..832cbd6 100644 (file)
@@ -2067,7 +2067,7 @@ err_dma_unregister:
 err_clk_disable:
        clk_disable_unprepare(atxdmac->clk);
 err_free_irq:
-       free_irq(atxdmac->irq, atxdmac->dma.dev);
+       free_irq(atxdmac->irq, atxdmac);
        return ret;
 }
 
@@ -2081,7 +2081,7 @@ static int at_xdmac_remove(struct platform_device *pdev)
        dma_async_device_unregister(&atxdmac->dma);
        clk_disable_unprepare(atxdmac->clk);
 
-       free_irq(atxdmac->irq, atxdmac->dma.dev);
+       free_irq(atxdmac->irq, atxdmac);
 
        for (i = 0; i < atxdmac->dma.chancnt; i++) {
                struct at_xdmac_chan *atchan = &atxdmac->chan[i];
index aad167e..de2a2a2 100644 (file)
@@ -836,6 +836,7 @@ static int fsl_re_probe(struct platform_device *ofdev)
                rc = of_property_read_u32(np, "reg", &off);
                if (rc) {
                        dev_err(dev, "Reg property not found in JQ node\n");
+                       of_node_put(np);
                        return -ENODEV;
                }
                /* Find out the Job Rings present under each JQ */
index a4c53be..624f1e1 100644 (file)
@@ -861,7 +861,6 @@ static int mdc_dma_probe(struct platform_device *pdev)
 {
        struct mdc_dma *mdma;
        struct resource *res;
-       const struct of_device_id *match;
        unsigned int i;
        u32 val;
        int ret;
@@ -871,8 +870,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
                return -ENOMEM;
        platform_set_drvdata(pdev, mdma);
 
-       match = of_match_device(mdc_dma_of_match, &pdev->dev);
-       mdma->soc = match->data;
+       mdma->soc = of_device_get_match_data(&pdev->dev);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        mdma->regs = devm_ioremap_resource(&pdev->dev, res);
index dc7850a..3f56f9c 100644 (file)
@@ -638,7 +638,7 @@ static bool pxad_try_hotchain(struct virt_dma_chan *vc,
                vd_last_issued = list_entry(vc->desc_issued.prev,
                                            struct virt_dma_desc, node);
                pxad_desc_chain(vd_last_issued, vd);
-               if (is_chan_running(chan) || is_desc_completed(vd_last_issued))
+               if (is_chan_running(chan) || is_desc_completed(vd))
                        return true;
        }
 
@@ -671,6 +671,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
        struct virt_dma_desc *vd, *tmp;
        unsigned int dcsr;
        unsigned long flags;
+       bool vd_completed;
        dma_cookie_t last_started = 0;
 
        BUG_ON(!chan);
@@ -681,15 +682,17 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
 
        spin_lock_irqsave(&chan->vc.lock, flags);
        list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
+               vd_completed = is_desc_completed(vd);
                dev_dbg(&chan->vc.chan.dev->device,
-                       "%s(): checking txd %p[%x]: completed=%d\n",
-                       __func__, vd, vd->tx.cookie, is_desc_completed(vd));
+                       "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
+                       __func__, vd, vd->tx.cookie, vd_completed,
+                       dcsr);
                last_started = vd->tx.cookie;
                if (to_pxad_sw_desc(vd)->cyclic) {
                        vchan_cyclic_callback(vd);
                        break;
                }
-               if (is_desc_completed(vd)) {
+               if (vd_completed) {
                        list_del(&vd->node);
                        vchan_cookie_complete(vd);
                } else {
index 749f1bd..06ecdc3 100644 (file)
@@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
 {
        struct usb_dmac_chan *chan = dev;
        irqreturn_t ret = IRQ_NONE;
-       u32 mask = USB_DMACHCR_TE;
-       u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
+       u32 mask = 0;
        u32 chcr;
+       bool xfer_end = false;
 
        spin_lock(&chan->vc.lock);
 
        chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
-       if (chcr & check_bits)
-               mask |= USB_DMACHCR_DE | check_bits;
+       if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
+               mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
+               if (chcr & USB_DMACHCR_DE)
+                       xfer_end = true;
+               ret |= IRQ_HANDLED;
+       }
        if (chcr & USB_DMACHCR_NULL) {
                /* An interruption of TE will happen after we set FTE */
                mask |= USB_DMACHCR_NULL;
                chcr |= USB_DMACHCR_FTE;
                ret |= IRQ_HANDLED;
        }
-       usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
+       if (mask)
+               usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
 
-       if (chcr & check_bits) {
+       if (xfer_end)
                usb_dmac_isr_transfer_end(chan);
-               ret |= IRQ_HANDLED;
-       }
 
        spin_unlock(&chan->vc.lock);
 
index 4388937..ce2bc2a 100644 (file)
@@ -709,9 +709,10 @@ static int scpi_probe(struct platform_device *pdev)
                struct mbox_client *cl = &pchan->cl;
                struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
 
-               if (of_address_to_resource(shmem, 0, &res)) {
+               ret = of_address_to_resource(shmem, 0, &res);
+               of_node_put(shmem);
+               if (ret) {
                        dev_err(dev, "failed to get SCPI payload mem resource\n");
-                       ret = -EINVAL;
                        goto err;
                }
 
index 94a58a0..44c0139 100644 (file)
@@ -229,14 +229,14 @@ static int __init dmi_id_init(void)
 
        ret = device_register(dmi_dev);
        if (ret)
-               goto fail_free_dmi_dev;
+               goto fail_put_dmi_dev;
 
        return 0;
 
-fail_free_dmi_dev:
-       kfree(dmi_dev);
-fail_class_unregister:
+fail_put_dmi_dev:
+       put_device(dmi_dev);
 
+fail_class_unregister:
        class_unregister(&dmi_class);
 
        return ret;
index 66a9410..24caedb 100644 (file)
@@ -1131,6 +1131,7 @@ menu "SPI or I2C GPIO expanders"
 
 config GPIO_MCP23S08
        tristate "Microchip MCP23xxx I/O expander"
+       depends on OF_GPIO
        select GPIOLIB_IRQCHIP
        help
          SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
index ac22efc..99d37b5 100644 (file)
@@ -564,7 +564,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
        mcp->chip.direction_output = mcp23s08_direction_output;
        mcp->chip.set = mcp23s08_set;
        mcp->chip.dbg_show = mcp23s08_dbg_show;
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_GPIO
        mcp->chip.of_gpio_n_cells = 2;
        mcp->chip.of_node = dev->of_node;
 #endif
index 0c99e8f..8d8ee0e 100644 (file)
@@ -155,7 +155,7 @@ static int sa1100_gpio_irqdomain_map(struct irq_domain *d,
 {
        irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
                                 handle_edge_irq);
-       irq_set_noprobe(irq);
+       irq_set_probe(irq);
 
        return 0;
 }
index 75e7b39..a28feb3 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/io.h>
-#include <linux/io-mapping.h>
 #include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
index a31d7ef..ec1282a 100644 (file)
@@ -280,7 +280,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
 {
        unsigned i;
-       int r;
+       int r, ret = 0;
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
@@ -301,10 +301,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                        } else {
                                /* still not good, but we can live with it */
                                DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
+                               ret = r;
                        }
                }
        }
-       return 0;
+       return ret;
 }
 
 /*
index ee64669..77fdd99 100644 (file)
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
 static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static int cik_sdma_soft_reset(void *handle);
 
 MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
 MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
@@ -1037,6 +1038,8 @@ static int cik_sdma_resume(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       cik_sdma_soft_reset(handle);
+
        return cik_sdma_hw_init(adev);
 }
 
index d869d05..425413f 100644 (file)
@@ -2755,8 +2755,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
        u64 wb_gpu_addr;
        u32 *buf;
        struct bonaire_mqd *mqd;
-
-       gfx_v7_0_cp_compute_enable(adev, true);
+       struct amdgpu_ring *ring;
 
        /* fix up chicken bits */
        tmp = RREG32(mmCP_CPF_DEBUG);
@@ -2791,7 +2790,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
 
        /* init the queues.  Just two for now. */
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+               ring = &adev->gfx.compute_ring[i];
 
                if (ring->mqd_obj == NULL) {
                        r = amdgpu_bo_create(adev,
@@ -2970,6 +2969,13 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
                amdgpu_bo_unreserve(ring->mqd_obj);
 
                ring->ready = true;
+       }
+
+       gfx_v7_0_cp_compute_enable(adev, true);
+
+       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+               ring = &adev->gfx.compute_ring[i];
+
                r = amdgpu_ring_test_ring(ring);
                if (r)
                        ring->ready = false;
index 9f7dafc..7bf90e9 100644 (file)
@@ -171,10 +171,34 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
        drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
 }
 
+static int imx_drm_atomic_check(struct drm_device *dev,
+                               struct drm_atomic_state *state)
+{
+       int ret;
+
+       ret = drm_atomic_helper_check_modeset(dev, state);
+       if (ret)
+               return ret;
+
+       ret = drm_atomic_helper_check_planes(dev, state);
+       if (ret)
+               return ret;
+
+       /*
+        * Check modeset again in case crtc_state->mode_changed is
+        * updated in plane's ->atomic_check callback.
+        */
+       ret = drm_atomic_helper_check_modeset(dev, state);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
 static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
        .fb_create = drm_fb_cma_create,
        .output_poll_changed = imx_drm_output_poll_changed,
-       .atomic_check = drm_atomic_helper_check,
+       .atomic_check = imx_drm_atomic_check,
        .atomic_commit = drm_atomic_helper_commit,
 };
 
index 08e188b..462056e 100644 (file)
@@ -76,6 +76,8 @@ static void ipu_crtc_disable(struct drm_crtc *crtc)
                crtc->state->event = NULL;
        }
        spin_unlock_irq(&crtc->dev->event_lock);
+
+       drm_crtc_vblank_off(crtc);
 }
 
 static void imx_drm_crtc_reset(struct drm_crtc *crtc)
@@ -175,6 +177,8 @@ static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
 static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
                                  struct drm_crtc_state *old_crtc_state)
 {
+       drm_crtc_vblank_on(crtc);
+
        spin_lock_irq(&crtc->dev->event_lock);
        if (crtc->state->event) {
                WARN_ON(drm_crtc_vblank_get(crtc));
index 4ad67d0..29423e7 100644 (file)
@@ -319,13 +319,14 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
 
        /*
-        * since we cannot touch active IDMAC channels, we do not support
-        * resizing the enabled plane or changing its format
+        * We support resizing active plane or changing its format by
+        * forcing CRTC mode change and disabling-enabling plane in plane's
+        * ->atomic_update callback.
         */
        if (old_fb && (state->src_w != old_state->src_w ||
                              state->src_h != old_state->src_h ||
                              fb->pixel_format != old_fb->pixel_format))
-               return -EINVAL;
+               crtc_state->mode_changed = true;
 
        eba = drm_plane_state_to_eba(state);
 
@@ -336,7 +337,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
 
        if (old_fb && fb->pitches[0] != old_fb->pitches[0])
-               return -EINVAL;
+               crtc_state->mode_changed = true;
 
        switch (fb->pixel_format) {
        case DRM_FORMAT_YUV420:
@@ -372,7 +373,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
                        return -EINVAL;
 
                if (old_fb && old_fb->pitches[1] != fb->pitches[1])
-                       return -EINVAL;
+                       crtc_state->mode_changed = true;
        }
 
        return 0;
@@ -392,8 +393,14 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
        enum ipu_color_space ics;
 
        if (old_state->fb) {
-               ipu_plane_atomic_set_base(ipu_plane, old_state);
-               return;
+               struct drm_crtc_state *crtc_state = state->crtc->state;
+
+               if (!crtc_state->mode_changed) {
+                       ipu_plane_atomic_set_base(ipu_plane, old_state);
+                       return;
+               }
+
+               ipu_disable_plane(plane);
        }
 
        switch (ipu_plane->dp_flow) {
index b4bc7f1..d0da52f 100644 (file)
@@ -157,6 +157,12 @@ struct msm_drm_private {
        struct shrinker shrinker;
 
        struct msm_vblank_ctrl vblank_ctrl;
+
+       /* task holding struct_mutex.. currently only used in submit path
+        * to detect and reject faults from copy_from_user() for submit
+        * ioctl.
+        */
+       struct task_struct *struct_mutex_task;
 };
 
 struct msm_format {
index 6cd4af4..85f3047 100644 (file)
@@ -196,11 +196,20 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct drm_gem_object *obj = vma->vm_private_data;
        struct drm_device *dev = obj->dev;
+       struct msm_drm_private *priv = dev->dev_private;
        struct page **pages;
        unsigned long pfn;
        pgoff_t pgoff;
        int ret;
 
+       /* This should only happen if userspace tries to pass a mmap'd
+        * but unfaulted gem bo vaddr into submit ioctl, triggering
+        * a page fault while struct_mutex is already held.  This is
+        * not a valid use-case so just bail.
+        */
+       if (priv->struct_mutex_task == current)
+               return VM_FAULT_SIGBUS;
+
        /* Make sure we don't parallel update on a fault, nor move or remove
         * something from beneath our feet
         */
index 9766f9a..880d6a9 100644 (file)
@@ -64,6 +64,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
        kfree(submit);
 }
 
+static inline unsigned long __must_check
+copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+       if (access_ok(VERIFY_READ, from, n))
+               return __copy_from_user_inatomic(to, from, n);
+       return -EFAULT;
+}
+
 static int submit_lookup_objects(struct msm_gem_submit *submit,
                struct drm_msm_gem_submit *args, struct drm_file *file)
 {
@@ -71,6 +79,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
        int ret = 0;
 
        spin_lock(&file->table_lock);
+       pagefault_disable();
 
        for (i = 0; i < args->nr_bos; i++) {
                struct drm_msm_gem_submit_bo submit_bo;
@@ -84,10 +93,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                 */
                submit->bos[i].flags = 0;
 
-               ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
-               if (ret) {
-                       ret = -EFAULT;
-                       goto out_unlock;
+               ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
+               if (unlikely(ret)) {
+                       pagefault_enable();
+                       spin_unlock(&file->table_lock);
+                       ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+                       if (ret)
+                               goto out;
+                       spin_lock(&file->table_lock);
+                       pagefault_disable();
                }
 
                if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
@@ -127,9 +141,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
        }
 
 out_unlock:
-       submit->nr_bos = i;
+       pagefault_enable();
        spin_unlock(&file->table_lock);
 
+out:
+       submit->nr_bos = i;
+
        return ret;
 }
 
@@ -377,6 +394,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
+       priv->struct_mutex_task = current;
+
        submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
        if (!submit) {
                ret = -ENOMEM;
@@ -468,6 +487,7 @@ out:
        if (ret)
                msm_gem_submit_free(submit);
 out_unlock:
+       priv->struct_mutex_task = NULL;
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
index f2ad17a..dc57b62 100644 (file)
@@ -225,6 +225,17 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
        if (!parent_pdev)
                return false;
 
+       if (!parent_pdev->bridge_d3) {
+               /*
+                * Parent PCI bridge is currently not power managed.
+                * Since userspace can change these afterwards to be on
+                * the safe side we stick with _DSM and prevent usage of
+                * _PR3 from the bridge.
+                */
+               pci_d3cold_disable(pdev);
+               return false;
+       }
+
        parent_adev = ACPI_COMPANION(&parent_pdev->dev);
        if (!parent_adev)
                return false;
index 8b42d31..9ecef93 100644 (file)
@@ -57,21 +57,21 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
        switch (args->param) {
        case DRM_VC4_PARAM_V3D_IDENT0:
                ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
-               if (ret)
+               if (ret < 0)
                        return ret;
                args->value = V3D_READ(V3D_IDENT0);
                pm_runtime_put(&vc4->v3d->pdev->dev);
                break;
        case DRM_VC4_PARAM_V3D_IDENT1:
                ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
-               if (ret)
+               if (ret < 0)
                        return ret;
                args->value = V3D_READ(V3D_IDENT1);
                pm_runtime_put(&vc4->v3d->pdev->dev);
                break;
        case DRM_VC4_PARAM_V3D_IDENT2:
                ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
-               if (ret)
+               if (ret < 0)
                        return ret;
                args->value = V3D_READ(V3D_IDENT2);
                pm_runtime_put(&vc4->v3d->pdev->dev);
index 489e3de..428e249 100644 (file)
@@ -321,6 +321,15 @@ vc4_first_render_job(struct vc4_dev *vc4)
                                struct vc4_exec_info, head);
 }
 
+static inline struct vc4_exec_info *
+vc4_last_render_job(struct vc4_dev *vc4)
+{
+       if (list_empty(&vc4->render_job_list))
+               return NULL;
+       return list_last_entry(&vc4->render_job_list,
+                              struct vc4_exec_info, head);
+}
+
 /**
  * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
  * setup parameters.
index 6155e8a..b262c5c 100644 (file)
@@ -534,8 +534,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
                return -EINVAL;
        }
 
-       exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *),
-                          GFP_KERNEL);
+       exec->bo = drm_calloc_large(exec->bo_count,
+                                   sizeof(struct drm_gem_cma_object *));
        if (!exec->bo) {
                DRM_ERROR("Failed to allocate validated BO pointers\n");
                return -ENOMEM;
@@ -572,8 +572,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
        spin_unlock(&file_priv->table_lock);
 
 fail:
-       kfree(handles);
-       return 0;
+       drm_free_large(handles);
+       return ret;
 }
 
 static int
@@ -608,7 +608,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
         * read the contents back for validation, and I think the
         * bo->vaddr is uncached access.
         */
-       temp = kmalloc(temp_size, GFP_KERNEL);
+       temp = drm_malloc_ab(temp_size, 1);
        if (!temp) {
                DRM_ERROR("Failed to allocate storage for copying "
                          "in bin/render CLs.\n");
@@ -675,7 +675,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
        ret = vc4_validate_shader_recs(dev, exec);
 
 fail:
-       kfree(temp);
+       drm_free_large(temp);
        return ret;
 }
 
@@ -688,7 +688,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
        if (exec->bo) {
                for (i = 0; i < exec->bo_count; i++)
                        drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
-               kfree(exec->bo);
+               drm_free_large(exec->bo);
        }
 
        while (!list_empty(&exec->unref_list)) {
@@ -942,8 +942,8 @@ vc4_gem_destroy(struct drm_device *dev)
                vc4->overflow_mem = NULL;
        }
 
-       vc4_bo_cache_destroy(dev);
-
        if (vc4->hang_state)
                vc4_free_hang_state(dev, vc4->hang_state);
+
+       vc4_bo_cache_destroy(dev);
 }
index b0104a3..094bc6a 100644 (file)
@@ -83,8 +83,10 @@ vc4_overflow_mem_work(struct work_struct *work)
 
                spin_lock_irqsave(&vc4->job_lock, irqflags);
                current_exec = vc4_first_bin_job(vc4);
+               if (!current_exec)
+                       current_exec = vc4_last_render_job(vc4);
                if (current_exec) {
-                       vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
+                       vc4->overflow_mem->seqno = current_exec->seqno;
                        list_add_tail(&vc4->overflow_mem->unref_head,
                                      &current_exec->unref_list);
                        vc4->overflow_mem = NULL;
index f987432..258cb9a 100644 (file)
@@ -643,7 +643,7 @@ static int bcm_kona_i2c_xfer(struct i2c_adapter *adapter,
                        if (rc < 0) {
                                dev_err(dev->device,
                                        "restart cmd failed rc = %d\n", rc);
-                                       goto xfer_send_stop;
+                               goto xfer_send_stop;
                        }
                }
 
index 90bbd9f..3c16a2f 100644 (file)
@@ -767,7 +767,7 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
  * depending on the scaling direction.
  *
  * Return:     NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
- *             to acknowedge the change, NOTIFY_DONE if the notification is
+ *             to acknowledge the change, NOTIFY_DONE if the notification is
  *             considered irrelevant.
  */
 static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
index c6922b8..fcd973d 100644 (file)
@@ -367,13 +367,17 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
 
        /* Configure SDA Hold Time if required */
-       if (dev->sda_hold_time) {
-               reg = dw_readl(dev, DW_IC_COMP_VERSION);
-               if (reg >= DW_IC_SDA_HOLD_MIN_VERS)
+       reg = dw_readl(dev, DW_IC_COMP_VERSION);
+       if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
+               if (dev->sda_hold_time) {
                        dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
-               else
-                       dev_warn(dev->dev,
-                               "Hardware too old to adjust SDA hold time.");
+               } else {
+                       /* Keep previous hold time setting if no one set it */
+                       dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
+               }
+       } else {
+               dev_warn(dev->dev,
+                       "Hardware too old to adjust SDA hold time.\n");
        }
 
        /* Configure Tx/Rx FIFO threshold levels */
index 52407f3..9bd849d 100644 (file)
@@ -378,7 +378,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
        }
 
        dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
-       if (dma_mapping_error(dev, dma_addr)) {
+       if (dma_mapping_error(chan->device->dev, dma_addr)) {
                dev_dbg(dev, "dma map failed, using PIO\n");
                return;
        }
index 2bc8b01..5c5b7ca 100644 (file)
@@ -918,7 +918,7 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
  * Code adapted from i2c-cadence.c.
  *
  * Return:     NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
- *             to acknowedge the change, NOTIFY_DONE if the notification is
+ *             to acknowledge the change, NOTIFY_DONE if the notification is
  *             considered irrelevant.
  */
 static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
@@ -1111,6 +1111,15 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
        return ret < 0 ? ret : num;
 }
 
+static __maybe_unused int rk3x_i2c_resume(struct device *dev)
+{
+       struct rk3x_i2c *i2c = dev_get_drvdata(dev);
+
+       rk3x_i2c_adapt_div(i2c, clk_get_rate(i2c->clk));
+
+       return 0;
+}
+
 static u32 rk3x_i2c_func(struct i2c_adapter *adap)
 {
        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
@@ -1334,12 +1343,15 @@ static int rk3x_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume);
+
 static struct platform_driver rk3x_i2c_driver = {
        .probe   = rk3x_i2c_probe,
        .remove  = rk3x_i2c_remove,
        .driver  = {
                .name  = "rk3x-i2c",
                .of_match_table = rk3x_i2c_match,
+               .pm = &rk3x_i2c_pm_ops,
        },
 };
 
index 6fb3e26..05b1eea 100644 (file)
@@ -610,7 +610,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
                return;
 
        dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir);
-       if (dma_mapping_error(pd->dev, dma_addr)) {
+       if (dma_mapping_error(chan->device->dev, dma_addr)) {
                dev_dbg(pd->dev, "dma map failed, using PIO\n");
                return;
        }
index 215ac87..b3893f6 100644 (file)
@@ -37,8 +37,6 @@ struct i2c_demux_pinctrl_priv {
        struct i2c_demux_pinctrl_chan chan[];
 };
 
-static struct property status_okay = { .name = "status", .length = 3, .value = "ok" };
-
 static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
 {
        struct i2c_demux_pinctrl_priv *priv = adap->algo_data;
@@ -107,6 +105,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
        of_changeset_revert(&priv->chan[new_chan].chgset);
  err:
        dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
+       priv->cur_chan = -EINVAL;
        return ret;
 }
 
@@ -192,6 +191,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct i2c_demux_pinctrl_priv *priv;
+       struct property *props;
        int num_chan, i, j, err;
 
        num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL);
@@ -202,7 +202,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
 
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv)
                           + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL);
-       if (!priv)
+
+       props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL);
+
+       if (!priv || !props)
                return -ENOMEM;
 
        err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name);
@@ -220,8 +223,12 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
                }
                priv->chan[i].parent_np = adap_np;
 
+               props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
+               props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
+               props[i].length = 3;
+
                of_changeset_init(&priv->chan[i].chgset);
-               of_changeset_update_property(&priv->chan[i].chgset, adap_np, &status_okay);
+               of_changeset_update_property(&priv->chan[i].chgset, adap_np, &props[i]);
        }
 
        priv->num_chan = num_chan;
index 89d7820..78f148e 100644 (file)
@@ -20,6 +20,8 @@ config BMA180
 config BMA220
     tristate "Bosch BMA220 3-Axis Accelerometer Driver"
        depends on SPI
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
     help
       Say yes here to add support for the Bosch BMA220 triaxial
       acceleration sensor.
@@ -234,7 +236,8 @@ config STK8312
 config STK8BA50
        tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
        depends on I2C
-       depends on IIO_TRIGGER
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        help
          Say yes here to get support for the Sensortek STK8BA50 3-axis
          accelerometer.
index 1098d10..5099f29 100644 (file)
@@ -253,7 +253,7 @@ static int bma220_probe(struct spi_device *spi)
        if (ret < 0)
                return ret;
 
-       ret = iio_triggered_buffer_setup(indio_dev, NULL,
+       ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
                                         bma220_trigger_handler, NULL);
        if (ret < 0) {
                dev_err(&spi->dev, "iio triggered buffer setup failed\n");
index bf17aae..59b380d 100644 (file)
@@ -67,6 +67,9 @@
 #define BMC150_ACCEL_REG_PMU_BW                0x10
 #define BMC150_ACCEL_DEF_BW                    125
 
+#define BMC150_ACCEL_REG_RESET                 0x14
+#define BMC150_ACCEL_RESET_VAL                 0xB6
+
 #define BMC150_ACCEL_REG_INT_MAP_0             0x19
 #define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE       BIT(2)
 
@@ -1497,6 +1500,14 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
        int ret, i;
        unsigned int val;
 
+       /*
+        * Reset chip to get it in a known good state. A delay of 1.8ms after
+        * reset is required according to the data sheets of supported chips.
+        */
+       regmap_write(data->regmap, BMC150_ACCEL_REG_RESET,
+                    BMC150_ACCEL_RESET_VAL);
+       usleep_range(1800, 2500);
+
        ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
        if (ret < 0) {
                dev_err(dev, "Error: Reading chip id\n");
index 3a9f106..9d72d4b 100644 (file)
@@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
                if (ret < 0)
                        goto error_ret;
                *val = ret;
+               ret = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SCALE:
                ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
                if (ret < 0)
                        goto error_ret;
+               *val = 0;
                *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
                ret = IIO_VAL_INT_PLUS_MICRO;
                break;
index 1de31bd..7675772 100644 (file)
@@ -389,6 +389,7 @@ config QCOM_SPMI_VADC
 config ROCKCHIP_SARADC
        tristate "Rockchip SARADC driver"
        depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
+       depends on RESET_CONTROLLER
        help
          Say yes here to build support for the SARADC found in SoCs from
          Rockchip.
index b616376..9704090 100644 (file)
@@ -527,6 +527,7 @@ static struct attribute_group ad799x_event_attrs_group = {
 static const struct iio_info ad7991_info = {
        .read_raw = &ad799x_read_raw,
        .driver_module = THIS_MODULE,
+       .update_scan_mode = ad799x_update_scan_mode,
 };
 
 static const struct iio_info ad7993_4_7_8_noirq_info = {
index 52430ba..0438c68 100644 (file)
@@ -381,8 +381,8 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private)
                st->ts_bufferedmeasure = false;
                input_report_key(st->ts_input, BTN_TOUCH, 0);
                input_sync(st->ts_input);
-       } else if (status & AT91_ADC_EOC(3)) {
-               /* Conversion finished */
+       } else if (status & AT91_ADC_EOC(3) && st->ts_input) {
+               /* Conversion finished and we've a touchscreen */
                if (st->ts_bufferedmeasure) {
                        /*
                         * Last measurement is always discarded, since it can
index f9ad6c2..85d7012 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/of_device.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
 #include <linux/regulator/consumer.h>
 #include <linux/iio/iio.h>
 
@@ -53,6 +55,7 @@ struct rockchip_saradc {
        struct clk              *clk;
        struct completion       completion;
        struct regulator        *vref;
+       struct reset_control    *reset;
        const struct rockchip_saradc_data *data;
        u16                     last_val;
 };
@@ -190,6 +193,16 @@ static const struct of_device_id rockchip_saradc_match[] = {
 };
 MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
 
+/**
+ * Reset SARADC Controller.
+ */
+static void rockchip_saradc_reset_controller(struct reset_control *reset)
+{
+       reset_control_assert(reset);
+       usleep_range(10, 20);
+       reset_control_deassert(reset);
+}
+
 static int rockchip_saradc_probe(struct platform_device *pdev)
 {
        struct rockchip_saradc *info = NULL;
@@ -218,6 +231,20 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
        if (IS_ERR(info->regs))
                return PTR_ERR(info->regs);
 
+       /*
+        * The reset should be an optional property, as it should work
+        * with old devicetrees as well
+        */
+       info->reset = devm_reset_control_get(&pdev->dev, "saradc-apb");
+       if (IS_ERR(info->reset)) {
+               ret = PTR_ERR(info->reset);
+               if (ret != -ENOENT)
+                       return ret;
+
+               dev_dbg(&pdev->dev, "no reset control found\n");
+               info->reset = NULL;
+       }
+
        init_completion(&info->completion);
 
        irq = platform_get_irq(pdev, 0);
@@ -252,6 +279,9 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
                return PTR_ERR(info->vref);
        }
 
+       if (info->reset)
+               rockchip_saradc_reset_controller(info->reset);
+
        /*
         * Use a default value for the converter clock.
         * This may become user-configurable in the future.
index 1ef3987..066abaf 100644 (file)
@@ -489,7 +489,8 @@ static struct iio_info ads1115_info = {
 #ifdef CONFIG_OF
 static int ads1015_get_channels_config_of(struct i2c_client *client)
 {
-       struct ads1015_data *data = i2c_get_clientdata(client);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct ads1015_data *data = iio_priv(indio_dev);
        struct device_node *node;
 
        if (!client->dev.of_node ||
index 8a36875..c3cfacc 100644 (file)
@@ -32,6 +32,7 @@
 
 struct tiadc_device {
        struct ti_tscadc_dev *mfd_tscadc;
+       struct mutex fifo1_lock; /* to protect fifo access */
        int channels;
        u8 channel_line[8];
        u8 channel_step[8];
@@ -359,6 +360,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
                int *val, int *val2, long mask)
 {
        struct tiadc_device *adc_dev = iio_priv(indio_dev);
+       int ret = IIO_VAL_INT;
        int i, map_val;
        unsigned int fifo1count, read, stepid;
        bool found = false;
@@ -372,13 +374,14 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
        if (!step_en)
                return -EINVAL;
 
+       mutex_lock(&adc_dev->fifo1_lock);
        fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
        while (fifo1count--)
                tiadc_readl(adc_dev, REG_FIFO1);
 
        am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en);
 
-       timeout = jiffies + usecs_to_jiffies
+       timeout = jiffies + msecs_to_jiffies
                                (IDLE_TIMEOUT * adc_dev->channels);
        /* Wait for Fifo threshold interrupt */
        while (1) {
@@ -388,7 +391,8 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
 
                if (time_after(jiffies, timeout)) {
                        am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
-                       return -EAGAIN;
+                       ret = -EAGAIN;
+                       goto err_unlock;
                }
        }
        map_val = adc_dev->channel_step[chan->scan_index];
@@ -414,8 +418,11 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
        am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
 
        if (found == false)
-               return -EBUSY;
-       return IIO_VAL_INT;
+               ret =  -EBUSY;
+
+err_unlock:
+       mutex_unlock(&adc_dev->fifo1_lock);
+       return ret;
 }
 
 static const struct iio_info tiadc_info = {
@@ -483,6 +490,7 @@ static int tiadc_probe(struct platform_device *pdev)
 
        tiadc_step_config(indio_dev);
        tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD);
+       mutex_init(&adc_dev->fifo1_lock);
 
        err = tiadc_channel_init(indio_dev, adc_dev->channels);
        if (err < 0)
index ae038a5..407f141 100644 (file)
@@ -434,7 +434,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
                        break;
                case IIO_ELECTRICALCONDUCTIVITY:
                        *val = 1; /* 0.00001 */
-                       *val = 100000;
+                       *val2 = 100000;
                        break;
                case IIO_CONCENTRATION:
                        *val = 0; /* 0.000000001 */
index e81f434..dc33c1d 100644 (file)
@@ -56,8 +56,8 @@ static struct {
        {HID_USAGE_SENSOR_ALS, 0, 1, 0},
        {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
 
-       {HID_USAGE_SENSOR_PRESSURE, 0, 100000, 0},
-       {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 1, 0},
+       {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
+       {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
 };
 
 static int pow_10(unsigned power)
index 792a971..bebbd00 100644 (file)
@@ -65,6 +65,16 @@ struct stx104_gpio {
        unsigned int out_state;
 };
 
+/**
+ * struct stx104_dev - STX104 device private data structure
+ * @indio_dev: IIO device
+ * @chip:      instance of the gpio_chip
+ */
+struct stx104_dev {
+       struct iio_dev *indio_dev;
+       struct gpio_chip *chip;
+};
+
 static int stx104_read_raw(struct iio_dev *indio_dev,
        struct iio_chan_spec const *chan, int *val, int *val2, long mask)
 {
@@ -107,6 +117,7 @@ static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
 static int stx104_gpio_get_direction(struct gpio_chip *chip,
        unsigned int offset)
 {
+       /* GPIO 0-3 are input only, while the rest are output only */
        if (offset < 4)
                return 1;
 
@@ -169,6 +180,7 @@ static int stx104_probe(struct device *dev, unsigned int id)
        struct iio_dev *indio_dev;
        struct stx104_iio *priv;
        struct stx104_gpio *stx104gpio;
+       struct stx104_dev *stx104dev;
        int err;
 
        indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
@@ -179,6 +191,10 @@ static int stx104_probe(struct device *dev, unsigned int id)
        if (!stx104gpio)
                return -ENOMEM;
 
+       stx104dev = devm_kzalloc(dev, sizeof(*stx104dev), GFP_KERNEL);
+       if (!stx104dev)
+               return -ENOMEM;
+
        if (!devm_request_region(dev, base[id], STX104_EXTENT,
                dev_name(dev))) {
                dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -199,12 +215,6 @@ static int stx104_probe(struct device *dev, unsigned int id)
        outw(0, base[id] + 4);
        outw(0, base[id] + 6);
 
-       err = devm_iio_device_register(dev, indio_dev);
-       if (err) {
-               dev_err(dev, "IIO device registering failed (%d)\n", err);
-               return err;
-       }
-
        stx104gpio->chip.label = dev_name(dev);
        stx104gpio->chip.parent = dev;
        stx104gpio->chip.owner = THIS_MODULE;
@@ -220,7 +230,9 @@ static int stx104_probe(struct device *dev, unsigned int id)
 
        spin_lock_init(&stx104gpio->lock);
 
-       dev_set_drvdata(dev, stx104gpio);
+       stx104dev->indio_dev = indio_dev;
+       stx104dev->chip = &stx104gpio->chip;
+       dev_set_drvdata(dev, stx104dev);
 
        err = gpiochip_add_data(&stx104gpio->chip, stx104gpio);
        if (err) {
@@ -228,14 +240,22 @@ static int stx104_probe(struct device *dev, unsigned int id)
                return err;
        }
 
+       err = iio_device_register(indio_dev);
+       if (err) {
+               dev_err(dev, "IIO device registering failed (%d)\n", err);
+               gpiochip_remove(&stx104gpio->chip);
+               return err;
+       }
+
        return 0;
 }
 
 static int stx104_remove(struct device *dev, unsigned int id)
 {
-       struct stx104_gpio *const stx104gpio = dev_get_drvdata(dev);
+       struct stx104_dev *const stx104dev = dev_get_drvdata(dev);
 
-       gpiochip_remove(&stx104gpio->chip);
+       iio_device_unregister(stx104dev->indio_dev);
+       gpiochip_remove(stx104dev->chip);
 
        return 0;
 }
index 738a86d..d041243 100644 (file)
@@ -6,6 +6,8 @@ menu "Humidity sensors"
 config AM2315
     tristate "Aosong AM2315 relative humidity and temperature sensor"
     depends on I2C
+    select IIO_BUFFER
+    select IIO_TRIGGERED_BUFFER
     help
       If you say yes here you get support for the Aosong AM2315
       relative humidity and ambient temperature sensor.
index 3e200f6..ff96b6d 100644 (file)
@@ -244,7 +244,7 @@ static int am2315_probe(struct i2c_client *client,
        indio_dev->channels = am2315_channels;
        indio_dev->num_channels = ARRAY_SIZE(am2315_channels);
 
-       ret = iio_triggered_buffer_setup(indio_dev, NULL,
+       ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
                                         am2315_trigger_handler, NULL);
        if (ret < 0) {
                dev_err(&client->dev, "iio triggered buffer setup failed\n");
index a03832a..e0c9c70 100644 (file)
@@ -142,7 +142,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
        struct i2c_client *client = data->client;
        int delay = data->adc_int_us[chan->address];
        int ret;
-       int val;
+       __be16 val;
 
        /* start measurement */
        ret = i2c_smbus_write_byte(client, chan->address);
@@ -154,26 +154,13 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
        /* wait for integration time to pass */
        usleep_range(delay, delay + 1000);
 
-       /*
-        * i2c_smbus_read_word_data cannot() be used here due to the command
-        * value not being understood and causes NAKs preventing any reading
-        * from being accessed.
-        */
-       ret = i2c_smbus_read_byte(client);
+       /* read measurement */
+       ret = i2c_master_recv(data->client, (char *)&val, sizeof(val));
        if (ret < 0) {
-               dev_err(&client->dev, "cannot read high byte measurement");
+               dev_err(&client->dev, "cannot read sensor data\n");
                return ret;
        }
-       val = ret << 8;
-
-       ret = i2c_smbus_read_byte(client);
-       if (ret < 0) {
-               dev_err(&client->dev, "cannot read low byte measurement");
-               return ret;
-       }
-       val |= ret;
-
-       return val;
+       return be16_to_cpu(val);
 }
 
 static int hdc100x_get_heater_status(struct hdc100x_data *data)
@@ -272,8 +259,8 @@ static int hdc100x_probe(struct i2c_client *client,
        struct iio_dev *indio_dev;
        struct hdc100x_data *data;
 
-       if (!i2c_check_functionality(client->adapter,
-                               I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE))
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
+                                    I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
                return -EOPNOTSUPP;
 
        indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
index 90462fc..158aaf4 100644 (file)
@@ -107,9 +107,10 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
 {
        struct iio_dev *indio_dev = filp->private_data;
        struct iio_buffer *rb = indio_dev->buffer;
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        size_t datum_size;
        size_t to_wait;
-       int ret;
+       int ret = 0;
 
        if (!indio_dev->info)
                return -ENODEV;
@@ -131,19 +132,29 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
        else
                to_wait = min_t(size_t, n / datum_size, rb->watermark);
 
+       add_wait_queue(&rb->pollq, &wait);
        do {
-               ret = wait_event_interruptible(rb->pollq,
-                     iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size));
-               if (ret)
-                       return ret;
+               if (!indio_dev->info) {
+                       ret = -ENODEV;
+                       break;
+               }
 
-               if (!indio_dev->info)
-                       return -ENODEV;
+               if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
+                       if (signal_pending(current)) {
+                               ret = -ERESTARTSYS;
+                               break;
+                       }
+
+                       wait_woken(&wait, TASK_INTERRUPTIBLE,
+                                  MAX_SCHEDULE_TIMEOUT);
+                       continue;
+               }
 
                ret = rb->access->read_first_n(rb, n, buf);
                if (ret == 0 && (filp->f_flags & O_NONBLOCK))
                        ret = -EAGAIN;
-        } while (ret == 0);
+       } while (ret == 0);
+       remove_wait_queue(&rb->pollq, &wait);
 
        return ret;
 }
index f914d5d..d2b8899 100644 (file)
@@ -613,9 +613,8 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
                        return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
        case IIO_VAL_FRACTIONAL:
                tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
-               vals[1] = do_div(tmp, 1000000000LL);
-               vals[0] = tmp;
-               return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+               vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]);
+               return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1]));
        case IIO_VAL_FRACTIONAL_LOG2:
                tmp = (s64)vals[0] * 1000000000LL >> vals[1];
                vals[1] = do_div(tmp, 1000000000LL);
index 7c566f5..3574945 100644 (file)
@@ -76,7 +76,6 @@ config BH1750
 config BH1780
        tristate "ROHM BH1780 ambient light sensor"
        depends on I2C
-       depends on !SENSORS_BH1780
        help
         Say Y here to build support for the ROHM BH1780GLI ambient
         light sensor.
@@ -238,6 +237,8 @@ config MAX44000
        tristate "MAX44000 Ambient and Infrared Proximity Sensor"
        depends on I2C
        select REGMAP_I2C
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        help
         Say Y here if you want to build support for Maxim Integrated's
         MAX44000 ambient and infrared proximity sensor device.
index 6943688..e5a533c 100644 (file)
@@ -970,7 +970,7 @@ int bmp280_common_probe(struct device *dev,
        data->vdda = devm_regulator_get(dev, "vdda");
        if (IS_ERR(data->vdda)) {
                dev_err(dev, "failed to get VDDA regulator\n");
-               ret = PTR_ERR(data->vddd);
+               ret = PTR_ERR(data->vdda);
                goto out_disable_vddd;
        }
        ret = regulator_enable(data->vdda);
@@ -1079,7 +1079,8 @@ EXPORT_SYMBOL(bmp280_common_remove);
 #ifdef CONFIG_PM
 static int bmp280_runtime_suspend(struct device *dev)
 {
-       struct bmp280_data *data = dev_get_drvdata(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct bmp280_data *data = iio_priv(indio_dev);
        int ret;
 
        ret = regulator_disable(data->vdda);
@@ -1090,7 +1091,8 @@ static int bmp280_runtime_suspend(struct device *dev)
 
 static int bmp280_runtime_resume(struct device *dev)
 {
-       struct bmp280_data *data = dev_get_drvdata(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct bmp280_data *data = iio_priv(indio_dev);
        int ret;
 
        ret = regulator_enable(data->vddd);
index 2e3a70e..5656deb 100644 (file)
@@ -397,7 +397,7 @@ static int as3935_probe(struct spi_device *spi)
                return ret;
        }
 
-       ret = iio_triggered_buffer_setup(indio_dev, NULL,
+       ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
                &as3935_trigger_handler, NULL);
 
        if (ret) {
index 3a3c5d7..51c79b2 100644 (file)
@@ -106,7 +106,6 @@ struct mcast_group {
        atomic_t                refcount;
        enum mcast_group_state  state;
        struct ib_sa_query      *query;
-       int                     query_id;
        u16                     pkey_index;
        u8                      leave_state;
        int                     retries;
@@ -340,11 +339,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member)
                                       member->multicast.comp_mask,
                                       3000, GFP_KERNEL, join_handler, group,
                                       &group->query);
-       if (ret >= 0) {
-               group->query_id = ret;
-               ret = 0;
-       }
-       return ret;
+       return (ret > 0) ? 0 : ret;
 }
 
 static int send_leave(struct mcast_group *group, u8 leave_state)
@@ -364,11 +359,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
                                       IB_SA_MCMEMBER_REC_JOIN_STATE,
                                       3000, GFP_KERNEL, leave_handler,
                                       group, &group->query);
-       if (ret >= 0) {
-               group->query_id = ret;
-               ret = 0;
-       }
-       return ret;
+       return (ret > 0) ? 0 : ret;
 }
 
 static void join_group(struct mcast_group *group, struct mcast_member *member,
index 23f38cf..afe8b28 100644 (file)
@@ -1,6 +1,7 @@
 config INFINIBAND_CXGB4
        tristate "Chelsio T4/T5 RDMA Driver"
        depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
+       select CHELSIO_LIB
        select GENERIC_ALLOCATOR
        ---help---
          This is an iWARP/RDMA driver for the Chelsio T4 and T5
index e11cf72..fa40b68 100644 (file)
@@ -1,4 +1,5 @@
 ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
 
 obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
 
index b6a953a..3cbbfbe 100644 (file)
@@ -49,6 +49,7 @@
 
 #include <rdma/ib_addr.h>
 
+#include <libcxgb_cm.h>
 #include "iw_cxgb4.h"
 #include "clip_tbl.h"
 
@@ -239,15 +240,13 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
 
 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
 {
-       struct cpl_tid_release *req;
+       u32 len = roundup(sizeof(struct cpl_tid_release), 16);
 
-       skb = get_skb(skb, sizeof *req, GFP_KERNEL);
+       skb = get_skb(skb, len, GFP_KERNEL);
        if (!skb)
                return;
-       req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
-       INIT_TP_WR(req, hwtid);
-       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
-       set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+
+       cxgb_mk_tid_release(skb, len, hwtid, 0);
        c4iw_ofld_send(rdev, skb);
        return;
 }
@@ -464,72 +463,6 @@ static struct net_device *get_real_dev(struct net_device *egress_dev)
        return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
 }
 
-static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
-{
-       int i;
-
-       egress_dev = get_real_dev(egress_dev);
-       for (i = 0; i < dev->rdev.lldi.nports; i++)
-               if (dev->rdev.lldi.ports[i] == egress_dev)
-                       return 1;
-       return 0;
-}
-
-static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
-                                    __u8 *peer_ip, __be16 local_port,
-                                    __be16 peer_port, u8 tos,
-                                    __u32 sin6_scope_id)
-{
-       struct dst_entry *dst = NULL;
-
-       if (IS_ENABLED(CONFIG_IPV6)) {
-               struct flowi6 fl6;
-
-               memset(&fl6, 0, sizeof(fl6));
-               memcpy(&fl6.daddr, peer_ip, 16);
-               memcpy(&fl6.saddr, local_ip, 16);
-               if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
-                       fl6.flowi6_oif = sin6_scope_id;
-               dst = ip6_route_output(&init_net, NULL, &fl6);
-               if (!dst)
-                       goto out;
-               if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
-                   !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
-                       dst_release(dst);
-                       dst = NULL;
-               }
-       }
-
-out:
-       return dst;
-}
-
-static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
-                                __be32 peer_ip, __be16 local_port,
-                                __be16 peer_port, u8 tos)
-{
-       struct rtable *rt;
-       struct flowi4 fl4;
-       struct neighbour *n;
-
-       rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
-                                  peer_port, local_port, IPPROTO_TCP,
-                                  tos, 0);
-       if (IS_ERR(rt))
-               return NULL;
-       n = dst_neigh_lookup(&rt->dst, &peer_ip);
-       if (!n)
-               return NULL;
-       if (!our_interface(dev, n->dev) &&
-           !(n->dev->flags & IFF_LOOPBACK)) {
-               neigh_release(n);
-               dst_release(&rt->dst);
-               return NULL;
-       }
-       neigh_release(n);
-       return &rt->dst;
-}
-
 static void arp_failure_discard(void *handle, struct sk_buff *skb)
 {
        pr_err(MOD "ARP failure\n");
@@ -704,56 +637,32 @@ static int send_flowc(struct c4iw_ep *ep)
 
 static int send_halfclose(struct c4iw_ep *ep)
 {
-       struct cpl_close_con_req *req;
        struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
-       int wrlen = roundup(sizeof *req, 16);
+       u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
 
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
        if (WARN_ON(!skb))
                return -ENOMEM;
 
-       set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
-       t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
-       req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
-       memset(req, 0, wrlen);
-       INIT_TP_WR(req, ep->hwtid);
-       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
-                                                   ep->hwtid));
+       cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
+                             NULL, arp_failure_discard);
+
        return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 }
 
 static int send_abort(struct c4iw_ep *ep)
 {
-       struct cpl_abort_req *req;
-       int wrlen = roundup(sizeof *req, 16);
+       u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
        struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
 
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
        if (WARN_ON(!req_skb))
                return -ENOMEM;
 
-       set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx);
-       t4_set_arp_err_handler(req_skb, ep, abort_arp_failure);
-       req = (struct cpl_abort_req *)skb_put(req_skb, wrlen);
-       memset(req, 0, wrlen);
-       INIT_TP_WR(req, ep->hwtid);
-       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
-       req->cmd = CPL_ABORT_SEND_RST;
-       return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
-}
+       cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
+                         ep, abort_arp_failure);
 
-static void best_mtu(const unsigned short *mtus, unsigned short mtu,
-                    unsigned int *idx, int use_ts, int ipv6)
-{
-       unsigned short hdr_size = (ipv6 ?
-                                  sizeof(struct ipv6hdr) :
-                                  sizeof(struct iphdr)) +
-                                 sizeof(struct tcphdr) +
-                                 (use_ts ?
-                                  round_up(TCPOLEN_TIMESTAMP, 4) : 0);
-       unsigned short data_size = mtu - hdr_size;
-
-       cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+       return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
 }
 
 static int send_connect(struct c4iw_ep *ep)
@@ -768,7 +677,7 @@ static int send_connect(struct c4iw_ep *ep)
        u64 opt0;
        u32 opt2;
        unsigned int mtu_idx;
-       int wscale;
+       u32 wscale;
        int win, sizev4, sizev6, wrlen;
        struct sockaddr_in *la = (struct sockaddr_in *)
                                 &ep->com.local_addr;
@@ -815,10 +724,10 @@ static int send_connect(struct c4iw_ep *ep)
        }
        set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
 
-       best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
-                enable_tcp_timestamps,
-                (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
-       wscale = compute_wscale(rcv_win);
+       cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+                     enable_tcp_timestamps,
+                     (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+       wscale = cxgb_compute_wscale(rcv_win);
 
        /*
         * Specify the largest window that will fit in opt0. The
@@ -1445,9 +1354,9 @@ static void established_upcall(struct c4iw_ep *ep)
 
 static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
 {
-       struct cpl_rx_data_ack *req;
        struct sk_buff *skb;
-       int wrlen = roundup(sizeof *req, 16);
+       u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
+       u32 credit_dack;
 
        PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
        skb = get_skb(NULL, wrlen, GFP_KERNEL);
@@ -1464,15 +1373,12 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
        if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
                credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
 
-       req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
-       memset(req, 0, wrlen);
-       INIT_TP_WR(req, ep->hwtid);
-       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
-                                                   ep->hwtid));
-       req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
-                                      RX_DACK_CHANGE_F |
-                                      RX_DACK_MODE_V(dack_mode));
-       set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
+       credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F |
+                     RX_DACK_MODE_V(dack_mode);
+
+       cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
+                           credit_dack);
+
        c4iw_ofld_send(&ep->com.dev->rdev, skb);
        return credits;
 }
@@ -1970,7 +1876,7 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
        struct sk_buff *skb;
        struct fw_ofld_connection_wr *req;
        unsigned int mtu_idx;
-       int wscale;
+       u32 wscale;
        struct sockaddr_in *sin;
        int win;
 
@@ -1995,10 +1901,10 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
                        htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
        req->tcb.tx_max = (__force __be32) jiffies;
        req->tcb.rcv_adv = htons(1);
-       best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
-                enable_tcp_timestamps,
-                (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
-       wscale = compute_wscale(rcv_win);
+       cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+                     enable_tcp_timestamps,
+                     (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+       wscale = cxgb_compute_wscale(rcv_win);
 
        /*
         * Specify the largest window that will fit in opt0. The
@@ -2052,15 +1958,6 @@ static inline int act_open_has_tid(int status)
                status != CPL_ERR_CONN_EXIST);
 }
 
-/* Returns whether a CPL status conveys negative advice.
- */
-static int is_neg_adv(unsigned int status)
-{
-       return status == CPL_ERR_RTX_NEG_ADVICE ||
-              status == CPL_ERR_PERSIST_NEG_ADVICE ||
-              status == CPL_ERR_KEEPALV_NEG_ADVICE;
-}
-
 static char *neg_adv_str(unsigned int status)
 {
        switch (status) {
@@ -2214,16 +2111,21 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
 
        /* find a route */
        if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
-               ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
-                                    raddr->sin_addr.s_addr, laddr->sin_port,
-                                    raddr->sin_port, ep->com.cm_id->tos);
+               ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
+                                         laddr->sin_addr.s_addr,
+                                         raddr->sin_addr.s_addr,
+                                         laddr->sin_port,
+                                         raddr->sin_port, ep->com.cm_id->tos);
                iptype = 4;
                ra = (__u8 *)&raddr->sin_addr;
        } else {
-               ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
-                                     raddr6->sin6_addr.s6_addr,
-                                     laddr6->sin6_port, raddr6->sin6_port, 0,
-                                     raddr6->sin6_scope_id);
+               ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
+                                          get_real_dev,
+                                          laddr6->sin6_addr.s6_addr,
+                                          raddr6->sin6_addr.s6_addr,
+                                          laddr6->sin6_port,
+                                          raddr6->sin6_port, 0,
+                                          raddr6->sin6_scope_id);
                iptype = 6;
                ra = (__u8 *)&raddr6->sin6_addr;
        }
@@ -2295,7 +2197,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
        PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
             status, status2errno(status));
 
-       if (is_neg_adv(status)) {
+       if (cxgb_is_neg_adv(status)) {
                PDBG("%s Connection problems for atid %u status %u (%s)\n",
                     __func__, atid, status, neg_adv_str(status));
                ep->stats.connect_neg_adv++;
@@ -2422,7 +2324,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
        unsigned int mtu_idx;
        u64 opt0;
        u32 opt2;
-       int wscale;
+       u32 wscale;
        struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
        int win;
        enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
@@ -2443,10 +2345,10 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
        OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
                                                    ep->hwtid));
 
-       best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
-                enable_tcp_timestamps && req->tcpopt.tstamp,
-                (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
-       wscale = compute_wscale(rcv_win);
+       cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+                     enable_tcp_timestamps && req->tcpopt.tstamp,
+                     (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+       wscale = cxgb_compute_wscale(rcv_win);
 
        /*
         * Specify the largest window that will fit in opt0. The
@@ -2518,42 +2420,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
        return;
 }
 
-static void get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
-                      int *iptype, __u8 *local_ip, __u8 *peer_ip,
-                      __be16 *local_port, __be16 *peer_port)
-{
-       int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
-                     ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
-                     T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
-       int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
-                    IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
-                    T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
-       struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
-       struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
-       struct tcphdr *tcp = (struct tcphdr *)
-                            ((u8 *)(req + 1) + eth_len + ip_len);
-
-       if (ip->version == 4) {
-               PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
-                    ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
-                    ntohs(tcp->dest));
-               *iptype = 4;
-               memcpy(peer_ip, &ip->saddr, 4);
-               memcpy(local_ip, &ip->daddr, 4);
-       } else {
-               PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
-                    ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
-                    ntohs(tcp->dest));
-               *iptype = 6;
-               memcpy(peer_ip, ip6->saddr.s6_addr, 16);
-               memcpy(local_ip, ip6->daddr.s6_addr, 16);
-       }
-       *peer_port = tcp->source;
-       *local_port = tcp->dest;
-
-       return;
-}
-
 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct c4iw_ep *child_ep = NULL, *parent_ep;
@@ -2582,8 +2448,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                goto reject;
        }
 
-       get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, &iptype,
-                  local_ip, peer_ip, &local_port, &peer_port);
+       cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
+                       &iptype, local_ip, peer_ip, &local_port, &peer_port);
 
        /* Find output route */
        if (iptype == 4)  {
@@ -2591,18 +2457,19 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                     , __func__, parent_ep, hwtid,
                     local_ip, peer_ip, ntohs(local_port),
                     ntohs(peer_port), peer_mss);
-               dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
-                                local_port, peer_port,
-                                tos);
+               dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+                                     *(__be32 *)local_ip, *(__be32 *)peer_ip,
+                                     local_port, peer_port, tos);
        } else {
                PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
                     , __func__, parent_ep, hwtid,
                     local_ip, peer_ip, ntohs(local_port),
                     ntohs(peer_port), peer_mss);
-               dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
-                                 PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
-                                 ((struct sockaddr_in6 *)
-                                 &parent_ep->com.local_addr)->sin6_scope_id);
+               dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
+                               local_ip, peer_ip, local_port, peer_port,
+                               PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+                               ((struct sockaddr_in6 *)
+                                &parent_ep->com.local_addr)->sin6_scope_id);
        }
        if (!dst) {
                printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
@@ -2835,18 +2702,18 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct cpl_abort_req_rss *req = cplhdr(skb);
        struct c4iw_ep *ep;
-       struct cpl_abort_rpl *rpl;
        struct sk_buff *rpl_skb;
        struct c4iw_qp_attributes attrs;
        int ret;
        int release = 0;
        unsigned int tid = GET_TID(req);
+       u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
 
        ep = get_ep_from_tid(dev, tid);
        if (!ep)
                return 0;
 
-       if (is_neg_adv(req->status)) {
+       if (cxgb_is_neg_adv(req->status)) {
                PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
                     __func__, ep->hwtid, req->status,
                     neg_adv_str(req->status));
@@ -2939,11 +2806,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
                release = 1;
                goto out;
        }
-       set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
-       rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
-       INIT_TP_WR(rpl, ep->hwtid);
-       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
-       rpl->cmd = CPL_ABORT_NO_RST;
+
+       cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
+
        c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
 out:
        if (release)
@@ -3375,9 +3240,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
                     __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
                     ra, ntohs(raddr->sin_port));
-               ep->dst = find_route(dev, laddr->sin_addr.s_addr,
-                                    raddr->sin_addr.s_addr, laddr->sin_port,
-                                    raddr->sin_port, cm_id->tos);
+               ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+                                         laddr->sin_addr.s_addr,
+                                         raddr->sin_addr.s_addr,
+                                         laddr->sin_port,
+                                         raddr->sin_port, cm_id->tos);
        } else {
                iptype = 6;
                ra = (__u8 *)&raddr6->sin6_addr;
@@ -3396,10 +3263,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                     __func__, laddr6->sin6_addr.s6_addr,
                     ntohs(laddr6->sin6_port),
                     raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
-               ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
-                                     raddr6->sin6_addr.s6_addr,
-                                     laddr6->sin6_port, raddr6->sin6_port, 0,
-                                     raddr6->sin6_scope_id);
+               ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
+                                          laddr6->sin6_addr.s6_addr,
+                                          raddr6->sin6_addr.s6_addr,
+                                          laddr6->sin6_port,
+                                          raddr6->sin6_port, 0,
+                                          raddr6->sin6_scope_id);
        }
        if (!ep->dst) {
                printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
@@ -4041,8 +3910,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
             ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
             ntohs(tcph->source), iph->tos);
 
-       dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
-                        iph->tos);
+       dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+                             iph->daddr, iph->saddr, tcph->dest,
+                             tcph->source, iph->tos);
        if (!dst) {
                pr_err("%s - failed to find dst entry!\n",
                       __func__);
@@ -4317,7 +4187,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
                kfree_skb(skb);
                return 0;
        }
-       if (is_neg_adv(req->status)) {
+       if (cxgb_is_neg_adv(req->status)) {
                PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
                     __func__, ep->hwtid, req->status,
                     neg_adv_str(req->status));
index aa47e0a..6a9bef1 100644 (file)
@@ -881,15 +881,6 @@ static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
        return cm_id->provider_data;
 }
 
-static inline int compute_wscale(int win)
-{
-       int wscale = 0;
-
-       while (wscale < 14 && (65535<<wscale) < win)
-               wscale++;
-       return wscale;
-}
-
 static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
 {
 #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
index edb1172..6904352 100644 (file)
@@ -683,7 +683,7 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
        return 0;
 }
 
-void _free_qp(struct kref *kref)
+static void _free_qp(struct kref *kref)
 {
        struct c4iw_qp *qhp;
 
index b32638d..cc38004 100644 (file)
@@ -9490,6 +9490,78 @@ static void init_lcb(struct hfi1_devdata *dd)
        write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
 }
 
+/*
+ * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
+ * on error.
+ */
+static int test_qsfp_read(struct hfi1_pportdata *ppd)
+{
+       int ret;
+       u8 status;
+
+       /* report success if not a QSFP */
+       if (ppd->port_type != PORT_TYPE_QSFP)
+               return 0;
+
+       /* read byte 2, the status byte */
+       ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
+       if (ret < 0)
+               return ret;
+       if (ret != 1)
+               return -EIO;
+
+       return 0; /* success */
+}
+
+/*
+ * Values for QSFP retry.
+ *
+ * Give up after 10s (20 x 500ms).  The overall timeout was empirically
+ * arrived at from experience on a large cluster.
+ */
+#define MAX_QSFP_RETRIES 20
+#define QSFP_RETRY_WAIT 500 /* msec */
+
+/*
+ * Try a QSFP read.  If it fails, schedule a retry for later.
+ * Called on first link activation after driver load.
+ */
+static void try_start_link(struct hfi1_pportdata *ppd)
+{
+       if (test_qsfp_read(ppd)) {
+               /* read failed */
+               if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
+                       dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
+                       return;
+               }
+               dd_dev_info(ppd->dd,
+                           "QSFP not responding, waiting and retrying %d\n",
+                           (int)ppd->qsfp_retry_count);
+               ppd->qsfp_retry_count++;
+               queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
+                                  msecs_to_jiffies(QSFP_RETRY_WAIT));
+               return;
+       }
+       ppd->qsfp_retry_count = 0;
+
+       /*
+        * Tune the SerDes to a ballpark setting for optimal signal and bit
+        * error rate.  Needs to be done before starting the link.
+        */
+       tune_serdes(ppd);
+       start_link(ppd);
+}
+
+/*
+ * Workqueue function to start the link after a delay.
+ */
+void handle_start_link(struct work_struct *work)
+{
+       struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+                                                 start_link_work.work);
+       try_start_link(ppd);
+}
+
 int bringup_serdes(struct hfi1_pportdata *ppd)
 {
        struct hfi1_devdata *dd = ppd->dd;
@@ -9525,14 +9597,8 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
                set_qsfp_int_n(ppd, 1);
        }
 
-       /*
-        * Tune the SerDes to a ballpark setting for
-        * optimal signal and bit error rate
-        * Needs to be done before starting the link
-        */
-       tune_serdes(ppd);
-
-       return start_link(ppd);
+       try_start_link(ppd);
+       return 0;
 }
 
 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
@@ -9549,6 +9615,10 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
        ppd->driver_link_ready = 0;
        ppd->link_enabled = 0;
 
+       ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
+       flush_delayed_work(&ppd->start_link_work);
+       cancel_delayed_work_sync(&ppd->start_link_work);
+
        ppd->offline_disabled_reason =
                        HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
        set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
@@ -12865,7 +12935,7 @@ fail:
  */
 static int set_up_context_variables(struct hfi1_devdata *dd)
 {
-       int num_kernel_contexts;
+       unsigned long num_kernel_contexts;
        int total_contexts;
        int ret;
        unsigned ngroups;
@@ -12894,9 +12964,9 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
         */
        if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
                dd_dev_err(dd,
-                          "Reducing # kernel rcv contexts to: %d, from %d\n",
+                          "Reducing # kernel rcv contexts to: %d, from %lu\n",
                           (int)(dd->chip_send_contexts - num_vls - 1),
-                          (int)num_kernel_contexts);
+                          num_kernel_contexts);
                num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
        }
        /*
index ed11107..e295737 100644 (file)
@@ -706,6 +706,7 @@ void handle_link_up(struct work_struct *work);
 void handle_link_down(struct work_struct *work);
 void handle_link_downgrade(struct work_struct *work);
 void handle_link_bounce(struct work_struct *work);
+void handle_start_link(struct work_struct *work);
 void handle_sma_message(struct work_struct *work);
 void reset_qsfp(struct hfi1_pportdata *ppd);
 void qsfp_event(struct work_struct *work);
index a49cc88..5e9be16 100644 (file)
 
 static struct dentry *hfi1_dbg_root;
 
+/* wrappers to enforce srcu in seq file */
+static ssize_t hfi1_seq_read(
+       struct file *file,
+       char __user *buf,
+       size_t size,
+       loff_t *ppos)
+{
+       struct dentry *d = file->f_path.dentry;
+       int srcu_idx;
+       ssize_t r;
+
+       r = debugfs_use_file_start(d, &srcu_idx);
+       if (likely(!r))
+               r = seq_read(file, buf, size, ppos);
+       debugfs_use_file_finish(srcu_idx);
+       return r;
+}
+
+static loff_t hfi1_seq_lseek(
+       struct file *file,
+       loff_t offset,
+       int whence)
+{
+       struct dentry *d = file->f_path.dentry;
+       int srcu_idx;
+       loff_t r;
+
+       r = debugfs_use_file_start(d, &srcu_idx);
+       if (likely(!r))
+               r = seq_lseek(file, offset, whence);
+       debugfs_use_file_finish(srcu_idx);
+       return r;
+}
+
 #define private2dd(file) (file_inode(file)->i_private)
 #define private2ppd(file) (file_inode(file)->i_private)
 
@@ -87,8 +121,8 @@ static int _##name##_open(struct inode *inode, struct file *s) \
 static const struct file_operations _##name##_file_ops = { \
        .owner   = THIS_MODULE, \
        .open    = _##name##_open, \
-       .read    = seq_read, \
-       .llseek  = seq_lseek, \
+       .read    = hfi1_seq_read, \
+       .llseek  = hfi1_seq_lseek, \
        .release = seq_release \
 }
 
@@ -105,11 +139,9 @@ do { \
        DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
 
 static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
        struct hfi1_opcode_stats_perctx *opstats;
 
-       rcu_read_lock();
        if (*pos >= ARRAY_SIZE(opstats->stats))
                return NULL;
        return pos;
@@ -126,9 +158,7 @@ static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
 }
 
 static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static int _opcode_stats_seq_show(struct seq_file *s, void *v)
@@ -285,12 +315,10 @@ DEBUGFS_SEQ_FILE_OPEN(qp_stats)
 DEBUGFS_FILE_OPS(qp_stats);
 
 static void *_sdes_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
        struct hfi1_ibdev *ibd;
        struct hfi1_devdata *dd;
 
-       rcu_read_lock();
        ibd = (struct hfi1_ibdev *)s->private;
        dd = dd_from_dev(ibd);
        if (!dd->per_sdma || *pos >= dd->num_sdma)
@@ -310,9 +338,7 @@ static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos)
 }
 
 static void _sdes_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static int _sdes_seq_show(struct seq_file *s, void *v)
@@ -339,11 +365,9 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf,
        struct hfi1_devdata *dd;
        ssize_t rval;
 
-       rcu_read_lock();
        dd = private2dd(file);
        avail = hfi1_read_cntrs(dd, NULL, &counters);
        rval =  simple_read_from_buffer(buf, count, ppos, counters, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -356,11 +380,9 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
        struct hfi1_devdata *dd;
        ssize_t rval;
 
-       rcu_read_lock();
        dd = private2dd(file);
        avail = hfi1_read_cntrs(dd, &names, NULL);
        rval =  simple_read_from_buffer(buf, count, ppos, names, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -383,11 +405,9 @@ static ssize_t portnames_read(struct file *file, char __user *buf,
        struct hfi1_devdata *dd;
        ssize_t rval;
 
-       rcu_read_lock();
        dd = private2dd(file);
        avail = hfi1_read_portcntrs(dd->pport, &names, NULL);
        rval = simple_read_from_buffer(buf, count, ppos, names, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -400,11 +420,9 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
        struct hfi1_pportdata *ppd;
        ssize_t rval;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        avail = hfi1_read_portcntrs(ppd, NULL, &counters);
        rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
-       rcu_read_unlock();
        return rval;
 }
 
@@ -434,16 +452,13 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
        int used;
        int i;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        dd = ppd->dd;
        size = PAGE_SIZE;
        used = 0;
        tmp = kmalloc(size, GFP_KERNEL);
-       if (!tmp) {
-               rcu_read_unlock();
+       if (!tmp)
                return -ENOMEM;
-       }
 
        scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
        used += scnprintf(tmp + used, size - used,
@@ -470,7 +485,6 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
        used += scnprintf(tmp + used, size - used, "Write bits to clear\n");
 
        ret = simple_read_from_buffer(buf, count, ppos, tmp, used);
-       rcu_read_unlock();
        kfree(tmp);
        return ret;
 }
@@ -486,15 +500,12 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
        u64 scratch0;
        u64 clear;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        dd = ppd->dd;
 
        buff = kmalloc(count + 1, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto do_return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        ret = copy_from_user(buff, buf, count);
        if (ret > 0) {
@@ -527,8 +538,6 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
 
  do_free:
        kfree(buff);
- do_return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -542,18 +551,14 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
        char *tmp;
        int ret;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
        tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!tmp) {
-               rcu_read_unlock();
+       if (!tmp)
                return -ENOMEM;
-       }
 
        ret = qsfp_dump(ppd, tmp, PAGE_SIZE);
        if (ret > 0)
                ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
-       rcu_read_unlock();
        kfree(tmp);
        return ret;
 }
@@ -569,7 +574,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
        int offset;
        int total_written;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
 
        /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -577,16 +581,12 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
        offset = *ppos & 0xffff;
 
        /* explicitly reject invalid address 0 to catch cp and cat */
-       if (i2c_addr == 0) {
-               ret = -EINVAL;
-               goto _return;
-       }
+       if (i2c_addr == 0)
+               return -EINVAL;
 
        buff = kmalloc(count, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto _return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        ret = copy_from_user(buff, buf, count);
        if (ret > 0) {
@@ -606,8 +606,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
 
  _free:
        kfree(buff);
- _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -636,7 +634,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
        int offset;
        int total_read;
 
-       rcu_read_lock();
        ppd = private2ppd(file);
 
        /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -644,16 +641,12 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
        offset = *ppos & 0xffff;
 
        /* explicitly reject invalid address 0 to catch cp and cat */
-       if (i2c_addr == 0) {
-               ret = -EINVAL;
-               goto _return;
-       }
+       if (i2c_addr == 0)
+               return -EINVAL;
 
        buff = kmalloc(count, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto _return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count);
        if (total_read < 0) {
@@ -673,8 +666,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
 
  _free:
        kfree(buff);
- _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -701,26 +692,20 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
        int ret;
        int total_written;
 
-       rcu_read_lock();
-       if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
-               ret = -EINVAL;
-               goto _return;
-       }
+       if (*ppos + count > QSFP_PAGESIZE * 4) /* base page + page00-page03 */
+               return -EINVAL;
 
        ppd = private2ppd(file);
 
        buff = kmalloc(count, GFP_KERNEL);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto _return;
-       }
+       if (!buff)
+               return -ENOMEM;
 
        ret = copy_from_user(buff, buf, count);
        if (ret > 0) {
                ret = -EFAULT;
                goto _free;
        }
-
        total_written = qsfp_write(ppd, target, *ppos, buff, count);
        if (total_written < 0) {
                ret = total_written;
@@ -733,8 +718,6 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
 
  _free:
        kfree(buff);
- _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -761,7 +744,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
        int ret;
        int total_read;
 
-       rcu_read_lock();
        if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
                ret = -EINVAL;
                goto _return;
@@ -794,7 +776,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
  _free:
        kfree(buff);
  _return:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1010,7 +991,6 @@ void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
        debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
 out:
        ibd->hfi1_ibdev_dbg = NULL;
-       synchronize_rcu();
 }
 
 /*
@@ -1035,9 +1015,7 @@ static const char * const hfi1_statnames[] = {
 };
 
 static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
-       rcu_read_lock();
        if (*pos >= ARRAY_SIZE(hfi1_statnames))
                return NULL;
        return pos;
@@ -1055,9 +1033,7 @@ static void *_driver_stats_names_seq_next(
 }
 
 static void _driver_stats_names_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static int _driver_stats_names_seq_show(struct seq_file *s, void *v)
@@ -1073,9 +1049,7 @@ DEBUGFS_SEQ_FILE_OPEN(driver_stats_names)
 DEBUGFS_FILE_OPS(driver_stats_names);
 
 static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
 {
-       rcu_read_lock();
        if (*pos >= ARRAY_SIZE(hfi1_statnames))
                return NULL;
        return pos;
@@ -1090,9 +1064,7 @@ static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
 }
 
 static void _driver_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
 {
-       rcu_read_unlock();
 }
 
 static u64 hfi1_sps_ints(void)
index a021e66..325ec21 100644 (file)
@@ -605,6 +605,7 @@ struct hfi1_pportdata {
        struct work_struct freeze_work;
        struct work_struct link_downgrade_work;
        struct work_struct link_bounce_work;
+       struct delayed_work start_link_work;
        /* host link state variables */
        struct mutex hls_lock;
        u32 host_link_state;
@@ -659,6 +660,7 @@ struct hfi1_pportdata {
        u8 linkinit_reason;
        u8 local_tx_rate;       /* rate given to 8051 firmware */
        u8 last_pstate;         /* info only */
+       u8 qsfp_retry_count;
 
        /* placeholders for IB MAD packet settings */
        u8 overrun_threshold;
@@ -1804,7 +1806,7 @@ extern unsigned int hfi1_max_mtu;
 extern unsigned int hfi1_cu;
 extern unsigned int user_credit_return_threshold;
 extern int num_user_contexts;
-extern unsigned n_krcvqs;
+extern unsigned long n_krcvqs;
 extern uint krcvqs[];
 extern int krcvqsset;
 extern uint kdeth_qp;
index b793545..384b43d 100644 (file)
@@ -94,7 +94,7 @@ module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
 
 /* computed based on above array */
-unsigned n_krcvqs;
+unsigned long n_krcvqs;
 
 static unsigned hfi1_rcvarr_split = 25;
 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
@@ -500,6 +500,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
        INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
        INIT_WORK(&ppd->sma_message_work, handle_sma_message);
        INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
+       INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
        INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
        INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
 
index 39e42c3..7ffc14f 100644 (file)
@@ -2604,7 +2604,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
        u8 lq, num_vls;
        u8 res_lli, res_ler;
        u64 port_mask;
-       unsigned long port_num;
+       u8 port_num;
        unsigned long vl;
        u32 vl_select_mask;
        int vfi;
@@ -2638,9 +2638,9 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
-       if ((u8)port_num != port) {
+       if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
                return reply((struct ib_mad_hdr *)pmp);
        }
@@ -2842,7 +2842,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
        if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3015,7 +3015,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
        if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3252,7 +3252,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
         */
        port_mask = be64_to_cpu(req->port_select_mask[3]);
        port_num = find_first_bit((unsigned long *)&port_mask,
-                                 sizeof(port_mask));
+                                 sizeof(port_mask) * 8);
 
        if (port_num != port) {
                pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
index 8c25e1b..3a1ef30 100644 (file)
@@ -771,6 +771,9 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
                        read_extra_bytes(pbuf, from, to_fill);
                        from += to_fill;
                        nbytes -= to_fill;
+                       /* may not be enough valid bytes left to align */
+                       if (extra > nbytes)
+                               extra = nbytes;
 
                        /* ...now write carry */
                        dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
@@ -798,6 +801,15 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
                        read_low_bytes(pbuf, from, extra);
                        from += extra;
                        nbytes -= extra;
+                       /*
+                        * If no bytes are left, return early - we are done.
+                        * NOTE: This short-circuit is *required* because
+                        * "extra" may have been reduced in size and "from"
+                        * is not aligned, as required when leaving this
+                        * if block.
+                        */
+                       if (nbytes == 0)
+                               return;
                }
 
                /* at this point, from is QW aligned */
index 0ecf279..1694037 100644 (file)
@@ -114,6 +114,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
 #define KDETH_HCRC_LOWER_SHIFT    24
 #define KDETH_HCRC_LOWER_MASK     0xff
 
+#define AHG_KDETH_INTR_SHIFT 12
+
 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
 
@@ -1480,7 +1482,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
                /* Clear KDETH.SH on last packet */
                if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
                        val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
-                                                               INTR) >> 16);
+                                                    INTR) <<
+                                          AHG_KDETH_INTR_SHIFT);
                        val &= cpu_to_le16(~(1U << 13));
                        AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
                } else {
index 3ee0cad..0c92a40 100644 (file)
@@ -265,6 +265,7 @@ void i40iw_next_iw_state(struct i40iw_qp *iwqp,
                info.dont_send_fin = false;
        if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
                info.reset_tcp_conn = true;
+       iwqp->hw_iwarp_state = state;
        i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
 }
 
index 0cbbe40..445e230 100644 (file)
@@ -100,7 +100,7 @@ static struct notifier_block i40iw_net_notifier = {
        .notifier_call = i40iw_net_event
 };
 
-static int i40iw_notifiers_registered;
+static atomic_t i40iw_notifiers_registered;
 
 /**
  * i40iw_find_i40e_handler - find a handler given a client info
@@ -1342,12 +1342,11 @@ exit:
  */
 static void i40iw_register_notifiers(void)
 {
-       if (!i40iw_notifiers_registered) {
+       if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
                register_inetaddr_notifier(&i40iw_inetaddr_notifier);
                register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
                register_netevent_notifier(&i40iw_net_notifier);
        }
-       i40iw_notifiers_registered++;
 }
 
 /**
@@ -1429,8 +1428,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
                        i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
                /* fallthrough */
        case INET_NOTIFIER:
-               if (i40iw_notifiers_registered > 0) {
-                       i40iw_notifiers_registered--;
+               if (!atomic_dec_return(&i40iw_notifiers_registered)) {
                        unregister_netevent_notifier(&i40iw_net_notifier);
                        unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
                        unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
index 006db64..5df63da 100644 (file)
@@ -687,12 +687,6 @@ repoll:
        is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
                MLX4_CQE_OPCODE_ERROR;
 
-       if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
-                    is_send)) {
-               pr_warn("Completion for NOP opcode detected!\n");
-               return -EAGAIN;
-       }
-
        /* Resize CQ in progress */
        if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
                if (cq->resize_buf) {
@@ -718,12 +712,6 @@ repoll:
                 */
                mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
                                       be32_to_cpu(cqe->vlan_my_qpn));
-               if (unlikely(!mqp)) {
-                       pr_warn("CQ %06x with entry for unknown QPN %06x\n",
-                              cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
-                       return -EAGAIN;
-               }
-
                *cur_qp = to_mibqp(mqp);
        }
 
@@ -736,11 +724,6 @@ repoll:
                /* SRQ is also in the radix tree */
                msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
                                       srq_num);
-               if (unlikely(!msrq)) {
-                       pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
-                               cq->mcq.cqn, srq_num);
-                       return -EAGAIN;
-               }
        }
 
        if (is_send) {
@@ -891,7 +874,6 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        struct mlx4_ib_qp *cur_qp = NULL;
        unsigned long flags;
        int npolled;
-       int err = 0;
        struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
 
        spin_lock_irqsave(&cq->lock, flags);
@@ -901,8 +883,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        }
 
        for (npolled = 0; npolled < num_entries; ++npolled) {
-               err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
-               if (err)
+               if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
                        break;
        }
 
@@ -911,10 +892,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 out:
        spin_unlock_irqrestore(&cq->lock, flags);
 
-       if (err == 0 || err == -EAGAIN)
-               return npolled;
-       else
-               return err;
+       return npolled;
 }
 
 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
index 35a9f71..5de9a65 100644 (file)
@@ -553,12 +553,6 @@ repoll:
                 * from the table.
                 */
                mqp = __mlx5_qp_lookup(dev->mdev, qpn);
-               if (unlikely(!mqp)) {
-                       mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
-                                    cq->mcq.cqn, qpn);
-                       return -EINVAL;
-               }
-
                *cur_qp = to_mibqp(mqp);
        }
 
@@ -619,13 +613,6 @@ repoll:
                read_lock(&dev->mdev->priv.mkey_table.lock);
                mmkey = __mlx5_mr_lookup(dev->mdev,
                                         mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
-               if (unlikely(!mmkey)) {
-                       read_unlock(&dev->mdev->priv.mkey_table.lock);
-                       mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
-                                    cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
-                       return -EINVAL;
-               }
-
                mr = to_mibmr(mmkey);
                get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
                mr->sig->sig_err_exists = true;
@@ -676,7 +663,6 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        unsigned long flags;
        int soft_polled = 0;
        int npolled;
-       int err = 0;
 
        spin_lock_irqsave(&cq->lock, flags);
        if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -688,8 +674,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
                soft_polled = poll_soft_wc(cq, num_entries, wc);
 
        for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
-               err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled);
-               if (err)
+               if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
                        break;
        }
 
@@ -698,10 +683,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 out:
        spin_unlock_irqrestore(&cq->lock, flags);
 
-       if (err == 0 || err == -EAGAIN)
-               return soft_polled + npolled;
-       else
-               return err;
+       return soft_polled + npolled;
 }
 
 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
index 8db6fdf..e4aecbf 100644 (file)
@@ -1844,6 +1844,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
                                           int domain)
 {
        struct mlx5_ib_dev *dev = to_mdev(qp->device);
+       struct mlx5_ib_qp *mqp = to_mqp(qp);
        struct mlx5_ib_flow_handler *handler = NULL;
        struct mlx5_flow_destination *dst = NULL;
        struct mlx5_ib_flow_prio *ft_prio;
@@ -1870,7 +1871,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
        }
 
        dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-       dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
+       if (mqp->flags & MLX5_IB_QP_RSS)
+               dst->tir_num = mqp->rss_qp.tirn;
+       else
+               dst->tir_num = mqp->raw_packet_qp.rq.tirn;
 
        if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
                if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
index 40df2cc..996b54e 100644 (file)
@@ -71,7 +71,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
 
        addr = addr >> page_shift;
        tmp = (unsigned long)addr;
-       m = find_first_bit(&tmp, sizeof(tmp));
+       m = find_first_bit(&tmp, BITS_PER_LONG);
        skip = 1 << m;
        mask = skip - 1;
        i = 0;
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
                for (k = 0; k < len; k++) {
                        if (!(i & mask)) {
                                tmp = (unsigned long)pfn;
-                               m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
+                               m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
                                skip = 1 << m;
                                mask = skip - 1;
                                base = pfn;
@@ -89,7 +89,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
                        } else {
                                if (base + p != pfn) {
                                        tmp = (unsigned long)p;
-                                       m = find_first_bit(&tmp, sizeof(tmp));
+                                       m = find_first_bit(&tmp, BITS_PER_LONG);
                                        skip = 1 << m;
                                        mask = skip - 1;
                                        base = pfn;
index a59034a..67cc741 100644 (file)
@@ -402,6 +402,7 @@ enum mlx5_ib_qp_flags {
        /* QP uses 1 as its source QP number */
        MLX5_IB_QP_SQPN_QP1                     = 1 << 6,
        MLX5_IB_QP_CAP_SCATTER_FCS              = 1 << 7,
+       MLX5_IB_QP_RSS                          = 1 << 8,
 };
 
 struct mlx5_umr_wr {
index f3c943f..9529b46 100644 (file)
@@ -1457,6 +1457,7 @@ create_tir:
        kvfree(in);
        /* qpn is reserved for that QP */
        qp->trans_qp.base.mqp.qpn = 0;
+       qp->flags |= MLX5_IB_QP_RSS;
        return 0;
 
 err:
@@ -3656,12 +3657,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
                     struct ib_send_wr *wr, unsigned *idx,
                     int *size, int nreq)
 {
-       int err = 0;
-
-       if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
-               err = -ENOMEM;
-               return err;
-       }
+       if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
+               return -ENOMEM;
 
        *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
        *seg = mlx5_get_send_wqe(qp, *idx);
@@ -3677,7 +3674,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
        *seg += sizeof(**ctrl);
        *size = sizeof(**ctrl) / 16;
 
-       return err;
+       return 0;
 }
 
 static void finish_wqe(struct mlx5_ib_qp *qp,
@@ -3756,7 +3753,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                num_sge = wr->num_sge;
                if (unlikely(num_sge > qp->sq.max_gs)) {
                        mlx5_ib_warn(dev, "\n");
-                       err = -ENOMEM;
+                       err = -EINVAL;
                        *bad_wr = wr;
                        goto out;
                }
index 4f7d9b4..9dbfcc0 100644 (file)
@@ -478,6 +478,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                struct ipoib_ah *address, u32 qpn);
 void ipoib_reap_ah(struct work_struct *work);
 
+struct ipoib_path *__path_find(struct net_device *dev, void *gid);
 void ipoib_mark_paths_invalid(struct net_device *dev);
 void ipoib_flush_paths(struct net_device *dev);
 int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
index 951d9ab..4ad297d 100644 (file)
@@ -1318,6 +1318,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
        }
 }
 
+#define QPN_AND_OPTIONS_OFFSET 4
+
 static void ipoib_cm_tx_start(struct work_struct *work)
 {
        struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
@@ -1326,6 +1328,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
        struct ipoib_neigh *neigh;
        struct ipoib_cm_tx *p;
        unsigned long flags;
+       struct ipoib_path *path;
        int ret;
 
        struct ib_sa_path_rec pathrec;
@@ -1338,7 +1341,19 @@ static void ipoib_cm_tx_start(struct work_struct *work)
                p = list_entry(priv->cm.start_list.next, typeof(*p), list);
                list_del_init(&p->list);
                neigh = p->neigh;
+
                qpn = IPOIB_QPN(neigh->daddr);
+               /*
+                * As long as the search is with these 2 locks,
+                * path existence indicates its validity.
+                */
+               path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+               if (!path) {
+                       pr_info("%s ignore not valid path %pI6\n",
+                               __func__,
+                               neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+                       goto free_neigh;
+               }
                memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
 
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -1350,6 +1365,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
                spin_lock_irqsave(&priv->lock, flags);
 
                if (ret) {
+free_neigh:
                        neigh = p->neigh;
                        if (neigh) {
                                neigh->cm = NULL;
index 74bcaa0..cc1c1b0 100644 (file)
@@ -485,7 +485,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
        return -EINVAL;
 }
 
-static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+struct ipoib_path *__path_find(struct net_device *dev, void *gid)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct rb_node *n = priv->path_tree.rb_node;
index 7914c14..cae9bbc 100644 (file)
@@ -403,6 +403,7 @@ isert_init_conn(struct isert_conn *isert_conn)
        INIT_LIST_HEAD(&isert_conn->node);
        init_completion(&isert_conn->login_comp);
        init_completion(&isert_conn->login_req_comp);
+       init_waitqueue_head(&isert_conn->rem_wait);
        kref_init(&isert_conn->kref);
        mutex_init(&isert_conn->mutex);
        INIT_WORK(&isert_conn->release_work, isert_release_work);
@@ -578,7 +579,8 @@ isert_connect_release(struct isert_conn *isert_conn)
        BUG_ON(!device);
 
        isert_free_rx_descriptors(isert_conn);
-       if (isert_conn->cm_id)
+       if (isert_conn->cm_id &&
+           !isert_conn->dev_removed)
                rdma_destroy_id(isert_conn->cm_id);
 
        if (isert_conn->qp) {
@@ -593,7 +595,10 @@ isert_connect_release(struct isert_conn *isert_conn)
 
        isert_device_put(device);
 
-       kfree(isert_conn);
+       if (isert_conn->dev_removed)
+               wake_up_interruptible(&isert_conn->rem_wait);
+       else
+               kfree(isert_conn);
 }
 
 static void
@@ -753,6 +758,7 @@ static int
 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
        struct isert_np *isert_np = cma_id->context;
+       struct isert_conn *isert_conn;
        int ret = 0;
 
        isert_info("%s (%d): status %d id %p np %p\n",
@@ -773,10 +779,21 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                break;
        case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
        case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
-       case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
        case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
                ret = isert_disconnected_handler(cma_id, event->event);
                break;
+       case RDMA_CM_EVENT_DEVICE_REMOVAL:
+               isert_conn = cma_id->qp->qp_context;
+               isert_conn->dev_removed = true;
+               isert_disconnected_handler(cma_id, event->event);
+               wait_event_interruptible(isert_conn->rem_wait,
+                                        isert_conn->state == ISER_CONN_DOWN);
+               kfree(isert_conn);
+               /*
+                * return non-zero from the callback to destroy
+                * the rdma cm id
+                */
+               return 1;
        case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
        case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
        case RDMA_CM_EVENT_CONNECT_ERROR:
index fc791ef..c02ada5 100644 (file)
@@ -158,6 +158,8 @@ struct isert_conn {
        struct work_struct      release_work;
        bool                    logout_posted;
        bool                    snd_w_inv;
+       wait_queue_head_t       rem_wait;
+       bool                    dev_removed;
 };
 
 #define ISERT_MAX_CQ 64
index 97c3729..7817d40 100644 (file)
@@ -127,6 +127,7 @@ config XGENE_SLIMPRO_MBOX
 config BCM_PDC_MBOX
        tristate "Broadcom PDC Mailbox"
        depends on ARM64 || COMPILE_TEST
+       depends on HAS_DMA
        default ARCH_BCM_IPROC
        help
          Mailbox implementation for the Broadcom PDC ring manager,
index cbe0c1e..c19dd82 100644 (file)
@@ -469,7 +469,7 @@ static const struct file_operations pdc_debugfs_stats = {
  * this directory for a SPU.
  * @pdcs: PDC state structure
  */
-void pdc_setup_debugfs(struct pdc_state *pdcs)
+static void pdc_setup_debugfs(struct pdc_state *pdcs)
 {
        char spu_stats_name[16];
 
@@ -485,7 +485,7 @@ void pdc_setup_debugfs(struct pdc_state *pdcs)
                                                  &pdc_debugfs_stats);
 }
 
-void pdc_free_debugfs(void)
+static void pdc_free_debugfs(void)
 {
        if (debugfs_dir && simple_empty(debugfs_dir)) {
                debugfs_remove_recursive(debugfs_dir);
@@ -1191,10 +1191,11 @@ static void pdc_shutdown(struct mbox_chan *chan)
 {
        struct pdc_state *pdcs = chan->con_priv;
 
-       if (pdcs)
-               dev_dbg(&pdcs->pdev->dev,
-                       "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
+       if (!pdcs)
+               return;
 
+       dev_dbg(&pdcs->pdev->dev,
+               "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
        pdc_ring_free(pdcs);
 }
 
index 6fff794..13041ee 100644 (file)
@@ -2183,19 +2183,29 @@ location_show(struct mddev *mddev, char *page)
 static ssize_t
 location_store(struct mddev *mddev, const char *buf, size_t len)
 {
+       int rv;
 
+       rv = mddev_lock(mddev);
+       if (rv)
+               return rv;
        if (mddev->pers) {
-               if (!mddev->pers->quiesce)
-                       return -EBUSY;
-               if (mddev->recovery || mddev->sync_thread)
-                       return -EBUSY;
+               if (!mddev->pers->quiesce) {
+                       rv = -EBUSY;
+                       goto out;
+               }
+               if (mddev->recovery || mddev->sync_thread) {
+                       rv = -EBUSY;
+                       goto out;
+               }
        }
 
        if (mddev->bitmap || mddev->bitmap_info.file ||
            mddev->bitmap_info.offset) {
                /* bitmap already configured.  Only option is to clear it */
-               if (strncmp(buf, "none", 4) != 0)
-                       return -EBUSY;
+               if (strncmp(buf, "none", 4) != 0) {
+                       rv = -EBUSY;
+                       goto out;
+               }
                if (mddev->pers) {
                        mddev->pers->quiesce(mddev, 1);
                        bitmap_destroy(mddev);
@@ -2214,21 +2224,25 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
                        /* nothing to be done */;
                else if (strncmp(buf, "file:", 5) == 0) {
                        /* Not supported yet */
-                       return -EINVAL;
+                       rv = -EINVAL;
+                       goto out;
                } else {
-                       int rv;
                        if (buf[0] == '+')
                                rv = kstrtoll(buf+1, 10, &offset);
                        else
                                rv = kstrtoll(buf, 10, &offset);
                        if (rv)
-                               return rv;
-                       if (offset == 0)
-                               return -EINVAL;
+                               goto out;
+                       if (offset == 0) {
+                               rv = -EINVAL;
+                               goto out;
+                       }
                        if (mddev->bitmap_info.external == 0 &&
                            mddev->major_version == 0 &&
-                           offset != mddev->bitmap_info.default_offset)
-                               return -EINVAL;
+                           offset != mddev->bitmap_info.default_offset) {
+                               rv = -EINVAL;
+                               goto out;
+                       }
                        mddev->bitmap_info.offset = offset;
                        if (mddev->pers) {
                                struct bitmap *bitmap;
@@ -2245,7 +2259,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
                                mddev->pers->quiesce(mddev, 0);
                                if (rv) {
                                        bitmap_destroy(mddev);
-                                       return rv;
+                                       goto out;
                                }
                        }
                }
@@ -2257,6 +2271,11 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
                set_bit(MD_CHANGE_DEVS, &mddev->flags);
                md_wakeup_thread(mddev->thread);
        }
+       rv = 0;
+out:
+       mddev_unlock(mddev);
+       if (rv)
+               return rv;
        return len;
 }
 
index 6571c81..8625040 100644 (file)
@@ -1879,7 +1879,7 @@ static int __init dm_bufio_init(void)
        __cache_size_refresh();
        mutex_unlock(&dm_bufio_clients_lock);
 
-       dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
+       dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
        if (!dm_bufio_wq)
                return -ENOMEM;
 
index eedba67..8742957 100644 (file)
@@ -1453,7 +1453,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
        unsigned i;
        int err;
 
-       cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
+       cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
                           GFP_KERNEL);
        if (!cc->tfms)
                return -ENOMEM;
@@ -1924,6 +1924,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
                return DM_MAPIO_REMAPPED;
        }
 
+       /*
+        * Check if bio is too large, split as needed.
+        */
+       if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
+           bio_data_dir(bio) == WRITE)
+               dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
+
        io = dm_per_bio_data(bio, cc->per_bio_data_size);
        crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
        io->ctx.req = (struct skcipher_request *)(io + 1);
index 4ab6803..49e4d8d 100644 (file)
@@ -259,12 +259,12 @@ static int log_one_block(struct log_writes_c *lc,
                goto out;
        sector++;
 
-       bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
+       atomic_inc(&lc->io_blocks);
+       bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
        if (!bio) {
                DMERR("Couldn't alloc log bio");
                goto error;
        }
-       atomic_inc(&lc->io_blocks);
        bio->bi_iter.bi_size = 0;
        bio->bi_iter.bi_sector = sector;
        bio->bi_bdev = lc->logdev->bdev;
@@ -282,7 +282,7 @@ static int log_one_block(struct log_writes_c *lc,
                if (ret != block->vecs[i].bv_len) {
                        atomic_inc(&lc->io_blocks);
                        submit_bio(bio);
-                       bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
+                       bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
                        if (!bio) {
                                DMERR("Couldn't alloc log bio");
                                goto error;
@@ -459,9 +459,9 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       ret = -EINVAL;
        lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
-       if (!lc->log_kthread) {
+       if (IS_ERR(lc->log_kthread)) {
+               ret = PTR_ERR(lc->log_kthread);
                ti->error = "Couldn't alloc kthread";
                dm_put_device(ti, lc->dev);
                dm_put_device(ti, lc->logdev);
index 41573f1..34a840d 100644 (file)
@@ -834,8 +834,10 @@ static int join(struct mddev *mddev, int nodes)
                goto err;
        }
        cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
-       if (!cinfo->ack_lockres)
+       if (!cinfo->ack_lockres) {
+               ret = -ENOMEM;
                goto err;
+       }
        /* get sync CR lock on ACK. */
        if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
                pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
@@ -849,8 +851,10 @@ static int join(struct mddev *mddev, int nodes)
        pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
        snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
        cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
-       if (!cinfo->bitmap_lockres)
+       if (!cinfo->bitmap_lockres) {
+               ret = -ENOMEM;
                goto err;
+       }
        if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
                pr_err("Failed to get bitmap lock\n");
                ret = -EINVAL;
@@ -858,8 +862,10 @@ static int join(struct mddev *mddev, int nodes)
        }
 
        cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
-       if (!cinfo->resync_lockres)
+       if (!cinfo->resync_lockres) {
+               ret = -ENOMEM;
                goto err;
+       }
 
        return 0;
 err:
index d646f6e..67642ba 100644 (file)
@@ -1604,11 +1604,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
                        mddev->new_chunk_sectors = mddev->chunk_sectors;
                }
 
-               if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) {
+               if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
                        set_bit(MD_HAS_JOURNAL, &mddev->flags);
-                       if (mddev->recovery_cp == MaxSector)
-                               set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
-               }
        } else if (mddev->pers == NULL) {
                /* Insist of good event counter while assembling, except for
                 * spares (which don't need an event count) */
@@ -5851,6 +5848,9 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
                        working++;
                        if (test_bit(In_sync, &rdev->flags))
                                insync++;
+                       else if (test_bit(Journal, &rdev->flags))
+                               /* TODO: add journal count to md_u.h */
+                               ;
                        else
                                spare++;
                }
@@ -7862,6 +7862,7 @@ void md_do_sync(struct md_thread *thread)
         */
 
        do {
+               int mddev2_minor = -1;
                mddev->curr_resync = 2;
 
        try_again:
@@ -7891,10 +7892,14 @@ void md_do_sync(struct md_thread *thread)
                                prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
                                if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
                                    mddev2->curr_resync >= mddev->curr_resync) {
-                                       printk(KERN_INFO "md: delaying %s of %s"
-                                              " until %s has finished (they"
-                                              " share one or more physical units)\n",
-                                              desc, mdname(mddev), mdname(mddev2));
+                                       if (mddev2_minor != mddev2->md_minor) {
+                                               mddev2_minor = mddev2->md_minor;
+                                               printk(KERN_INFO "md: delaying %s of %s"
+                                                      " until %s has finished (they"
+                                                      " share one or more physical units)\n",
+                                                      desc, mdname(mddev),
+                                                      mdname(mddev2));
+                                       }
                                        mddev_put(mddev2);
                                        if (signal_pending(current))
                                                flush_signals(current);
@@ -8275,16 +8280,13 @@ no_add:
 static void md_start_sync(struct work_struct *ws)
 {
        struct mddev *mddev = container_of(ws, struct mddev, del_work);
-       int ret = 0;
 
        mddev->sync_thread = md_register_thread(md_do_sync,
                                                mddev,
                                                "resync");
        if (!mddev->sync_thread) {
-               if (!(mddev_is_clustered(mddev) && ret == -EAGAIN))
-                       printk(KERN_ERR "%s: could not start resync"
-                              " thread...\n",
-                              mdname(mddev));
+               printk(KERN_ERR "%s: could not start resync thread...\n",
+                      mdname(mddev));
                /* leave the spares where they are, it shouldn't hurt */
                clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
                clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
index 0e4efcd..be1a9fc 100644 (file)
@@ -1064,6 +1064,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
        int max_sectors;
        int sectors;
 
+       md_write_start(mddev, bio);
+
        /*
         * Register the new request and wait if the reconstruction
         * thread has put up a bar for new requests.
@@ -1445,8 +1447,6 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
                return;
        }
 
-       md_write_start(mddev, bio);
-
        do {
 
                /*
@@ -2465,20 +2465,21 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
 
        while (sect_to_write) {
                struct bio *wbio;
+               sector_t wsector;
                if (sectors > sect_to_write)
                        sectors = sect_to_write;
                /* Write at 'sector' for 'sectors' */
                wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
                bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
-               wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
-                                  choose_data_offset(r10_bio, rdev) +
-                                  (sector - r10_bio->sector));
+               wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
+               wbio->bi_iter.bi_sector = wsector +
+                                  choose_data_offset(r10_bio, rdev);
                wbio->bi_bdev = rdev->bdev;
                bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
 
                if (submit_bio_wait(wbio) < 0)
                        /* Failure! */
-                       ok = rdev_set_badblocks(rdev, sector,
+                       ok = rdev_set_badblocks(rdev, wsector,
                                                sectors, 0)
                                && ok;
 
index 8912407..da583bb 100644 (file)
@@ -659,6 +659,7 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
 {
        struct stripe_head *sh;
        int hash = stripe_hash_locks_hash(sector);
+       int inc_empty_inactive_list_flag;
 
        pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
 
@@ -703,7 +704,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
                                        atomic_inc(&conf->active_stripes);
                                BUG_ON(list_empty(&sh->lru) &&
                                       !test_bit(STRIPE_EXPANDING, &sh->state));
+                               inc_empty_inactive_list_flag = 0;
+                               if (!list_empty(conf->inactive_list + hash))
+                                       inc_empty_inactive_list_flag = 1;
                                list_del_init(&sh->lru);
+                               if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
+                                       atomic_inc(&conf->empty_inactive_list_nr);
                                if (sh->group) {
                                        sh->group->stripes_cnt--;
                                        sh->group = NULL;
@@ -762,6 +768,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
        sector_t head_sector, tmp_sec;
        int hash;
        int dd_idx;
+       int inc_empty_inactive_list_flag;
 
        /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
        tmp_sec = sh->sector;
@@ -779,7 +786,12 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
                                atomic_inc(&conf->active_stripes);
                        BUG_ON(list_empty(&head->lru) &&
                               !test_bit(STRIPE_EXPANDING, &head->state));
+                       inc_empty_inactive_list_flag = 0;
+                       if (!list_empty(conf->inactive_list + hash))
+                               inc_empty_inactive_list_flag = 1;
                        list_del_init(&head->lru);
+                       if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
+                               atomic_inc(&conf->empty_inactive_list_nr);
                        if (head->group) {
                                head->group->stripes_cnt--;
                                head->group = NULL;
@@ -993,7 +1005,6 @@ again:
 
                        set_bit(STRIPE_IO_STARTED, &sh->state);
 
-                       bio_reset(bi);
                        bi->bi_bdev = rdev->bdev;
                        bio_set_op_attrs(bi, op, op_flags);
                        bi->bi_end_io = op_is_write(op)
@@ -1045,7 +1056,6 @@ again:
 
                        set_bit(STRIPE_IO_STARTED, &sh->state);
 
-                       bio_reset(rbi);
                        rbi->bi_bdev = rrdev->bdev;
                        bio_set_op_attrs(rbi, op, op_flags);
                        BUG_ON(!op_is_write(op));
@@ -1978,9 +1988,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
        put_cpu();
 }
 
-static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
+static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
+       int disks)
 {
        struct stripe_head *sh;
+       int i;
 
        sh = kmem_cache_zalloc(sc, gfp);
        if (sh) {
@@ -1989,6 +2001,17 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
                INIT_LIST_HEAD(&sh->batch_list);
                INIT_LIST_HEAD(&sh->lru);
                atomic_set(&sh->count, 1);
+               for (i = 0; i < disks; i++) {
+                       struct r5dev *dev = &sh->dev[i];
+
+                       bio_init(&dev->req);
+                       dev->req.bi_io_vec = &dev->vec;
+                       dev->req.bi_max_vecs = 1;
+
+                       bio_init(&dev->rreq);
+                       dev->rreq.bi_io_vec = &dev->rvec;
+                       dev->rreq.bi_max_vecs = 1;
+               }
        }
        return sh;
 }
@@ -1996,7 +2019,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
 {
        struct stripe_head *sh;
 
-       sh = alloc_stripe(conf->slab_cache, gfp);
+       sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size);
        if (!sh)
                return 0;
 
@@ -2167,7 +2190,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
        mutex_lock(&conf->cache_size_mutex);
 
        for (i = conf->max_nr_stripes; i; i--) {
-               nsh = alloc_stripe(sc, GFP_KERNEL);
+               nsh = alloc_stripe(sc, GFP_KERNEL, newsize);
                if (!nsh)
                        break;
 
@@ -2299,6 +2322,7 @@ static void raid5_end_read_request(struct bio * bi)
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
                bi->bi_error);
        if (i == disks) {
+               bio_reset(bi);
                BUG();
                return;
        }
@@ -2402,6 +2426,7 @@ static void raid5_end_read_request(struct bio * bi)
        clear_bit(R5_LOCKED, &sh->dev[i].flags);
        set_bit(STRIPE_HANDLE, &sh->state);
        raid5_release_stripe(sh);
+       bio_reset(bi);
 }
 
 static void raid5_end_write_request(struct bio *bi)
@@ -2436,6 +2461,7 @@ static void raid5_end_write_request(struct bio *bi)
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
                bi->bi_error);
        if (i == disks) {
+               bio_reset(bi);
                BUG();
                return;
        }
@@ -2479,22 +2505,13 @@ static void raid5_end_write_request(struct bio *bi)
 
        if (sh->batch_head && sh != sh->batch_head)
                raid5_release_stripe(sh->batch_head);
+       bio_reset(bi);
 }
 
 static void raid5_build_block(struct stripe_head *sh, int i, int previous)
 {
        struct r5dev *dev = &sh->dev[i];
 
-       bio_init(&dev->req);
-       dev->req.bi_io_vec = &dev->vec;
-       dev->req.bi_max_vecs = 1;
-       dev->req.bi_private = sh;
-
-       bio_init(&dev->rreq);
-       dev->rreq.bi_io_vec = &dev->rvec;
-       dev->rreq.bi_max_vecs = 1;
-       dev->rreq.bi_private = sh;
-
        dev->flags = 0;
        dev->sector = raid5_compute_blocknr(sh, i, previous);
 }
@@ -4628,7 +4645,9 @@ finish:
        }
 
        if (!bio_list_empty(&s.return_bi)) {
-               if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) {
+               if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
+                               (s.failed <= conf->max_degraded ||
+                                       conf->mddev->external == 0)) {
                        spin_lock_irq(&conf->device_lock);
                        bio_list_merge(&conf->return_bi, &s.return_bi);
                        spin_unlock_irq(&conf->device_lock);
@@ -6826,11 +6845,14 @@ static int raid5_run(struct mddev *mddev)
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
-       if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) {
-               printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n",
-                      mdname(mddev));
-               mddev->ro = 1;
-               set_disk_ro(mddev->gendisk, 1);
+       if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
+               if (!journal_dev) {
+                       pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
+                              mdname(mddev));
+                       mddev->ro = 1;
+                       set_disk_ro(mddev->gendisk, 1);
+               } else if (mddev->recovery_cp == MaxSector)
+                       set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
        }
 
        conf->min_offset_diff = min_offset_diff;
index 869c83f..f00f3e7 100644 (file)
@@ -2185,7 +2185,7 @@ static int gpmc_probe_dt(struct platform_device *pdev)
        return 0;
 }
 
-static int gpmc_probe_dt_children(struct platform_device *pdev)
+static void gpmc_probe_dt_children(struct platform_device *pdev)
 {
        int ret;
        struct device_node *child;
@@ -2200,11 +2200,11 @@ static int gpmc_probe_dt_children(struct platform_device *pdev)
                else
                        ret = gpmc_probe_generic_child(pdev, child);
 
-               if (ret)
-                       return ret;
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
+                               child->name, ret);
+               }
        }
-
-       return 0;
 }
 #else
 static int gpmc_probe_dt(struct platform_device *pdev)
@@ -2212,9 +2212,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
        return 0;
 }
 
-static int gpmc_probe_dt_children(struct platform_device *pdev)
+static void gpmc_probe_dt_children(struct platform_device *pdev)
 {
-       return 0;
 }
 #endif /* CONFIG_OF */
 
@@ -2369,16 +2368,10 @@ static int gpmc_probe(struct platform_device *pdev)
                goto setup_irq_failed;
        }
 
-       rc = gpmc_probe_dt_children(pdev);
-       if (rc < 0) {
-               dev_err(gpmc->dev, "failed to probe DT children\n");
-               goto dt_children_failed;
-       }
+       gpmc_probe_dt_children(pdev);
 
        return 0;
 
-dt_children_failed:
-       gpmc_free_irq(gpmc);
 setup_irq_failed:
        gpmc_gpio_exit(gpmc);
 gpio_init_failed:
index a216b46..d002528 100644 (file)
@@ -345,16 +345,6 @@ config SENSORS_TSL2550
          This driver can also be built as a module.  If so, the module
          will be called tsl2550.
 
-config SENSORS_BH1780
-       tristate "ROHM BH1780GLI ambient light sensor"
-       depends on I2C && SYSFS
-       help
-         If you say yes here you get support for the ROHM BH1780GLI
-         ambient light sensor.
-
-         This driver can also be built as a module.  If so, the module
-         will be called bh1780gli.
-
 config SENSORS_BH1770
          tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
          depends on I2C
index 7410c6d..fb32516 100644 (file)
@@ -19,7 +19,6 @@ obj-$(CONFIG_TIFM_CORE)               += tifm_core.o
 obj-$(CONFIG_TIFM_7XX1)        += tifm_7xx1.o
 obj-$(CONFIG_PHANTOM)          += phantom.o
 obj-$(CONFIG_QCOM_COINCELL)    += qcom-coincell.o
-obj-$(CONFIG_SENSORS_BH1780)   += bh1780gli.o
 obj-$(CONFIG_SENSORS_BH1770)   += bh1770glc.o
 obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
 obj-$(CONFIG_SGI_IOC4)         += ioc4.o
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
deleted file mode 100644 (file)
index 7f90ce5..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * bh1780gli.c
- * ROHM Ambient Light Sensor Driver
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Hemanth V <hemanthv@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#define BH1780_REG_CONTROL     0x80
-#define BH1780_REG_PARTID      0x8A
-#define BH1780_REG_MANFID      0x8B
-#define BH1780_REG_DLOW        0x8C
-#define BH1780_REG_DHIGH       0x8D
-
-#define BH1780_REVMASK         (0xf)
-#define BH1780_POWMASK         (0x3)
-#define BH1780_POFF            (0x0)
-#define BH1780_PON             (0x3)
-
-/* power on settling time in ms */
-#define BH1780_PON_DELAY       2
-
-struct bh1780_data {
-       struct i2c_client *client;
-       int power_state;
-       /* lock for sysfs operations */
-       struct mutex lock;
-};
-
-static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
-{
-       int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
-       if (ret < 0)
-               dev_err(&ddata->client->dev,
-                       "i2c_smbus_write_byte_data failed error %d Register (%s)\n",
-                       ret, msg);
-       return ret;
-}
-
-static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg)
-{
-       int ret = i2c_smbus_read_byte_data(ddata->client, reg);
-       if (ret < 0)
-               dev_err(&ddata->client->dev,
-                       "i2c_smbus_read_byte_data failed error %d Register (%s)\n",
-                       ret, msg);
-       return ret;
-}
-
-static ssize_t bh1780_show_lux(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct bh1780_data *ddata = platform_get_drvdata(pdev);
-       int lsb, msb;
-
-       lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
-       if (lsb < 0)
-               return lsb;
-
-       msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH");
-       if (msb < 0)
-               return msb;
-
-       return sprintf(buf, "%d\n", (msb << 8) | lsb);
-}
-
-static ssize_t bh1780_show_power_state(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct bh1780_data *ddata = platform_get_drvdata(pdev);
-       int state;
-
-       state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
-       if (state < 0)
-               return state;
-
-       return sprintf(buf, "%d\n", state & BH1780_POWMASK);
-}
-
-static ssize_t bh1780_store_power_state(struct device *dev,
-                                       struct device_attribute *attr,
-                                       const char *buf, size_t count)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct bh1780_data *ddata = platform_get_drvdata(pdev);
-       unsigned long val;
-       int error;
-
-       error = kstrtoul(buf, 0, &val);
-       if (error)
-               return error;
-
-       if (val < BH1780_POFF || val > BH1780_PON)
-               return -EINVAL;
-
-       mutex_lock(&ddata->lock);
-
-       error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
-       if (error < 0) {
-               mutex_unlock(&ddata->lock);
-               return error;
-       }
-
-       msleep(BH1780_PON_DELAY);
-       ddata->power_state = val;
-       mutex_unlock(&ddata->lock);
-
-       return count;
-}
-
-static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
-
-static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
-               bh1780_show_power_state, bh1780_store_power_state);
-
-static struct attribute *bh1780_attributes[] = {
-       &dev_attr_power_state.attr,
-       &dev_attr_lux.attr,
-       NULL
-};
-
-static const struct attribute_group bh1780_attr_group = {
-       .attrs = bh1780_attributes,
-};
-
-static int bh1780_probe(struct i2c_client *client,
-                                               const struct i2c_device_id *id)
-{
-       int ret;
-       struct bh1780_data *ddata;
-       struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
-
-       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
-               return -EIO;
-
-       ddata = devm_kzalloc(&client->dev, sizeof(struct bh1780_data),
-                            GFP_KERNEL);
-       if (ddata == NULL)
-               return -ENOMEM;
-
-       ddata->client = client;
-       i2c_set_clientdata(client, ddata);
-
-       ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
-       if (ret < 0)
-               return ret;
-
-       dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
-                       (ret & BH1780_REVMASK));
-
-       mutex_init(&ddata->lock);
-
-       return sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
-}
-
-static int bh1780_remove(struct i2c_client *client)
-{
-       sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int bh1780_suspend(struct device *dev)
-{
-       struct bh1780_data *ddata;
-       int state, ret;
-       struct i2c_client *client = to_i2c_client(dev);
-
-       ddata = i2c_get_clientdata(client);
-       state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
-       if (state < 0)
-               return state;
-
-       ddata->power_state = state & BH1780_POWMASK;
-
-       ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
-                               "CONTROL");
-
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-
-static int bh1780_resume(struct device *dev)
-{
-       struct bh1780_data *ddata;
-       int state, ret;
-       struct i2c_client *client = to_i2c_client(dev);
-
-       ddata = i2c_get_clientdata(client);
-       state = ddata->power_state;
-       ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
-                               "CONTROL");
-
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
-
-static const struct i2c_device_id bh1780_id[] = {
-       { "bh1780", 0 },
-       { },
-};
-
-MODULE_DEVICE_TABLE(i2c, bh1780_id);
-
-#ifdef CONFIG_OF
-static const struct of_device_id of_bh1780_match[] = {
-       { .compatible = "rohm,bh1780gli", },
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, of_bh1780_match);
-#endif
-
-static struct i2c_driver bh1780_driver = {
-       .probe          = bh1780_probe,
-       .remove         = bh1780_remove,
-       .id_table       = bh1780_id,
-       .driver = {
-               .name = "bh1780",
-               .pm     = &bh1780_pm,
-               .of_match_table = of_match_ptr(of_bh1780_match),
-       },
-};
-
-module_i2c_driver(bh1780_driver);
-
-MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
index 166b1db..3564477 100644 (file)
@@ -4,7 +4,7 @@
  */
 #include "lkdtm.h"
 
-void lkdtm_rodata_do_nothing(void)
+void notrace lkdtm_rodata_do_nothing(void)
 {
        /* Does nothing. We just want an architecture agnostic "return". */
 }
index 5525a20..1dd6114 100644 (file)
@@ -9,7 +9,15 @@
 #include <linux/uaccess.h>
 #include <asm/cacheflush.h>
 
-static size_t cache_size = 1024;
+/*
+ * Many of the tests here end up using const sizes, but those would
+ * normally be ignored by hardened usercopy, so force the compiler
+ * into choosing the non-const path to make sure we trigger the
+ * hardened usercopy checks by added "unconst" to all the const copies,
+ * and making sure "cache_size" isn't optimized into a const.
+ */
+static volatile size_t unconst = 0;
+static volatile size_t cache_size = 1024;
 static struct kmem_cache *bad_cache;
 
 static const unsigned char test_text[] = "This is a test.\n";
@@ -67,14 +75,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
        if (to_user) {
                pr_info("attempting good copy_to_user of local stack\n");
                if (copy_to_user((void __user *)user_addr, good_stack,
-                                sizeof(good_stack))) {
+                                unconst + sizeof(good_stack))) {
                        pr_warn("copy_to_user failed unexpectedly?!\n");
                        goto free_user;
                }
 
                pr_info("attempting bad copy_to_user of distant stack\n");
                if (copy_to_user((void __user *)user_addr, bad_stack,
-                                sizeof(good_stack))) {
+                                unconst + sizeof(good_stack))) {
                        pr_warn("copy_to_user failed, but lacked Oops\n");
                        goto free_user;
                }
@@ -88,14 +96,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
 
                pr_info("attempting good copy_from_user of local stack\n");
                if (copy_from_user(good_stack, (void __user *)user_addr,
-                                  sizeof(good_stack))) {
+                                  unconst + sizeof(good_stack))) {
                        pr_warn("copy_from_user failed unexpectedly?!\n");
                        goto free_user;
                }
 
                pr_info("attempting bad copy_from_user of distant stack\n");
                if (copy_from_user(bad_stack, (void __user *)user_addr,
-                                  sizeof(good_stack))) {
+                                  unconst + sizeof(good_stack))) {
                        pr_warn("copy_from_user failed, but lacked Oops\n");
                        goto free_user;
                }
@@ -109,7 +117,7 @@ static void do_usercopy_heap_size(bool to_user)
 {
        unsigned long user_addr;
        unsigned char *one, *two;
-       const size_t size = 1024;
+       size_t size = unconst + 1024;
 
        one = kmalloc(size, GFP_KERNEL);
        two = kmalloc(size, GFP_KERNEL);
@@ -285,13 +293,14 @@ void lkdtm_USERCOPY_KERNEL(void)
 
        pr_info("attempting good copy_to_user from kernel rodata\n");
        if (copy_to_user((void __user *)user_addr, test_text,
-                        sizeof(test_text))) {
+                        unconst + sizeof(test_text))) {
                pr_warn("copy_to_user failed unexpectedly?!\n");
                goto free_user;
        }
 
        pr_info("attempting bad copy_to_user from kernel text\n");
-       if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
+       if (copy_to_user((void __user *)user_addr, vm_mmap,
+                        unconst + PAGE_SIZE)) {
                pr_warn("copy_to_user failed, but lacked Oops\n");
                goto free_user;
        }
index e2fb44c..dc3a854 100644 (file)
@@ -1263,8 +1263,14 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
 static bool mei_me_fw_type_sps(struct pci_dev *pdev)
 {
        u32 reg;
-       /* Read ME FW Status check for SPS Firmware */
-       pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+       unsigned int devfn;
+
+       /*
+        * Read ME FW Status register to check for SPS Firmware
+        * The SPS FW is only signaled in pci function 0
+        */
+       devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+       pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
        trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
        /* if bits [19:16] = 15, running SPS Firmware */
        return (reg & 0xf0000) == 0xf0000;
index 64e64da..71cea9b 100644 (file)
@@ -85,8 +85,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
index c6a1309..3f31ca3 100644 (file)
@@ -1341,9 +1341,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                            slave_dev->name);
        }
 
-       /* already enslaved */
-       if (slave_dev->flags & IFF_SLAVE) {
-               netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
+       /* already in-use? */
+       if (netdev_is_rx_handler_busy(slave_dev)) {
+               netdev_err(bond_dev,
+                          "Error: Device is in use and cannot be enslaved\n");
                return -EBUSY;
        }
 
index de6d044..0659846 100644 (file)
@@ -25,4 +25,13 @@ source "drivers/net/dsa/b53/Kconfig"
 
 source "drivers/net/dsa/mv88e6xxx/Kconfig"
 
+config NET_DSA_QCA8K
+       tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
+       depends on NET_DSA
+       select NET_DSA_TAG_QCA
+       select REGMAP
+       ---help---
+         This enables support for the Qualcomm Atheros QCA8K Ethernet
+         switch chips.
+
 endmenu
index ca1e71b..8346e4f 100644 (file)
@@ -1,5 +1,6 @@
 obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
 obj-$(CONFIG_NET_DSA_BCM_SF2)  += bcm_sf2.o
+obj-$(CONFIG_NET_DSA_QCA8K)    += qca8k.o
 
 obj-y                          += b53/
 obj-y                          += mv88e6xxx/
index 76672da..f192a67 100644 (file)
@@ -372,7 +372,6 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
 
 #ifdef CONFIG_BCM47XX
 
-#include <linux/version.h>
 #include <linux/bcm47xx_nvram.h>
 #include <bcm47xx_board.h>
 static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
index 5bf4f34..e218887 100644 (file)
@@ -960,7 +960,7 @@ static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
        return 0;
 }
 
-struct b53_io_ops bcm_sf2_io_ops = {
+static struct b53_io_ops bcm_sf2_io_ops = {
        .read8  = bcm_sf2_core_read8,
        .read16 = bcm_sf2_core_read16,
        .read32 = bcm_sf2_core_read32,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
new file mode 100644 (file)
index 0000000..7f3f178
--- /dev/null
@@ -0,0 +1,1060 @@
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/if_bridge.h>
+#include <linux/mdio.h>
+#include <linux/etherdevice.h>
+
+#include "qca8k.h"
+
+#define MIB_DESC(_s, _o, _n)   \
+       {                       \
+               .size = (_s),   \
+               .offset = (_o), \
+               .name = (_n),   \
+       }
+
+static const struct qca8k_mib_desc ar8327_mib[] = {
+       MIB_DESC(1, 0x00, "RxBroad"),
+       MIB_DESC(1, 0x04, "RxPause"),
+       MIB_DESC(1, 0x08, "RxMulti"),
+       MIB_DESC(1, 0x0c, "RxFcsErr"),
+       MIB_DESC(1, 0x10, "RxAlignErr"),
+       MIB_DESC(1, 0x14, "RxRunt"),
+       MIB_DESC(1, 0x18, "RxFragment"),
+       MIB_DESC(1, 0x1c, "Rx64Byte"),
+       MIB_DESC(1, 0x20, "Rx128Byte"),
+       MIB_DESC(1, 0x24, "Rx256Byte"),
+       MIB_DESC(1, 0x28, "Rx512Byte"),
+       MIB_DESC(1, 0x2c, "Rx1024Byte"),
+       MIB_DESC(1, 0x30, "Rx1518Byte"),
+       MIB_DESC(1, 0x34, "RxMaxByte"),
+       MIB_DESC(1, 0x38, "RxTooLong"),
+       MIB_DESC(2, 0x3c, "RxGoodByte"),
+       MIB_DESC(2, 0x44, "RxBadByte"),
+       MIB_DESC(1, 0x4c, "RxOverFlow"),
+       MIB_DESC(1, 0x50, "Filtered"),
+       MIB_DESC(1, 0x54, "TxBroad"),
+       MIB_DESC(1, 0x58, "TxPause"),
+       MIB_DESC(1, 0x5c, "TxMulti"),
+       MIB_DESC(1, 0x60, "TxUnderRun"),
+       MIB_DESC(1, 0x64, "Tx64Byte"),
+       MIB_DESC(1, 0x68, "Tx128Byte"),
+       MIB_DESC(1, 0x6c, "Tx256Byte"),
+       MIB_DESC(1, 0x70, "Tx512Byte"),
+       MIB_DESC(1, 0x74, "Tx1024Byte"),
+       MIB_DESC(1, 0x78, "Tx1518Byte"),
+       MIB_DESC(1, 0x7c, "TxMaxByte"),
+       MIB_DESC(1, 0x80, "TxOverSize"),
+       MIB_DESC(2, 0x84, "TxByte"),
+       MIB_DESC(1, 0x8c, "TxCollision"),
+       MIB_DESC(1, 0x90, "TxAbortCol"),
+       MIB_DESC(1, 0x94, "TxMultiCol"),
+       MIB_DESC(1, 0x98, "TxSingleCol"),
+       MIB_DESC(1, 0x9c, "TxExcDefer"),
+       MIB_DESC(1, 0xa0, "TxDefer"),
+       MIB_DESC(1, 0xa4, "TxLateCol"),
+};
+
+/* The 32bit switch registers are accessed indirectly. To achieve this we need
+ * to set the page of the register. Track the last page that was set to reduce
+ * mdio writes
+ */
+static u16 qca8k_current_page = 0xffff;
+
+static void
+qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
+{
+       regaddr >>= 1;
+       *r1 = regaddr & 0x1e;
+
+       regaddr >>= 5;
+       *r2 = regaddr & 0x7;
+
+       regaddr >>= 3;
+       *page = regaddr & 0x3ff;
+}
+
+static u32
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum)
+{
+       u32 val;
+       int ret;
+
+       ret = bus->read(bus, phy_id, regnum);
+       if (ret >= 0) {
+               val = ret;
+               ret = bus->read(bus, phy_id, regnum + 1);
+               val |= ret << 16;
+       }
+
+       if (ret < 0) {
+               dev_err_ratelimited(&bus->dev,
+                                   "failed to read qca8k 32bit register\n");
+               return ret;
+       }
+
+       return val;
+}
+
+static void
+qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+{
+       u16 lo, hi;
+       int ret;
+
+       lo = val & 0xffff;
+       hi = (u16)(val >> 16);
+
+       ret = bus->write(bus, phy_id, regnum, lo);
+       if (ret >= 0)
+               ret = bus->write(bus, phy_id, regnum + 1, hi);
+       if (ret < 0)
+               dev_err_ratelimited(&bus->dev,
+                                   "failed to write qca8k 32bit register\n");
+}
+
+static void
+qca8k_set_page(struct mii_bus *bus, u16 page)
+{
+       if (page == qca8k_current_page)
+               return;
+
+       if (bus->write(bus, 0x18, 0, page) < 0)
+               dev_err_ratelimited(&bus->dev,
+                                   "failed to set qca8k page\n");
+       qca8k_current_page = page;
+}
+
+static u32
+qca8k_read(struct qca8k_priv *priv, u32 reg)
+{
+       u16 r1, r2, page;
+       u32 val;
+
+       qca8k_split_addr(reg, &r1, &r2, &page);
+
+       mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       qca8k_set_page(priv->bus, page);
+       val = qca8k_mii_read32(priv->bus, 0x10 | r2, r1);
+
+       mutex_unlock(&priv->bus->mdio_lock);
+
+       return val;
+}
+
+static void
+qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+       u16 r1, r2, page;
+
+       qca8k_split_addr(reg, &r1, &r2, &page);
+
+       mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       qca8k_set_page(priv->bus, page);
+       qca8k_mii_write32(priv->bus, 0x10 | r2, r1, val);
+
+       mutex_unlock(&priv->bus->mdio_lock);
+}
+
+static u32
+qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 val)
+{
+       u16 r1, r2, page;
+       u32 ret;
+
+       qca8k_split_addr(reg, &r1, &r2, &page);
+
+       mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+       qca8k_set_page(priv->bus, page);
+       ret = qca8k_mii_read32(priv->bus, 0x10 | r2, r1);
+       ret &= ~mask;
+       ret |= val;
+       qca8k_mii_write32(priv->bus, 0x10 | r2, r1, ret);
+
+       mutex_unlock(&priv->bus->mdio_lock);
+
+       return ret;
+}
+
+static void
+qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+       qca8k_rmw(priv, reg, 0, val);
+}
+
+static void
+qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+       qca8k_rmw(priv, reg, val, 0);
+}
+
+static int
+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+
+       *val = qca8k_read(priv, reg);
+
+       return 0;
+}
+
+static int
+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+
+       qca8k_write(priv, reg, val);
+
+       return 0;
+}
+
+static const struct regmap_range qca8k_readable_ranges[] = {
+       regmap_reg_range(0x0000, 0x00e4), /* Global control */
+       regmap_reg_range(0x0100, 0x0168), /* EEE control */
+       regmap_reg_range(0x0200, 0x0270), /* Parser control */
+       regmap_reg_range(0x0400, 0x0454), /* ACL */
+       regmap_reg_range(0x0600, 0x0718), /* Lookup */
+       regmap_reg_range(0x0800, 0x0b70), /* QM */
+       regmap_reg_range(0x0c00, 0x0c80), /* PKT */
+       regmap_reg_range(0x0e00, 0x0e98), /* L3 */
+       regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
+       regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
+       regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
+       regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
+       regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
+       regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
+       regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+
+};
+
+static struct regmap_access_table qca8k_readable_table = {
+       .yes_ranges = qca8k_readable_ranges,
+       .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
+};
+
+struct regmap_config qca8k_regmap_config = {
+       .reg_bits = 16,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = 0x16ac, /* end MIB - Port6 range */
+       .reg_read = qca8k_regmap_read,
+       .reg_write = qca8k_regmap_write,
+       .rd_table = &qca8k_readable_table,
+};
+
+static int
+qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+{
+       unsigned long timeout;
+
+       timeout = jiffies + msecs_to_jiffies(20);
+
+       /* loop until the busy flag has cleared */
+       do {
+               u32 val = qca8k_read(priv, reg);
+               int busy = val & mask;
+
+               if (!busy)
+                       break;
+               cond_resched();
+       } while (!time_after_eq(jiffies, timeout));
+
+       return time_after_eq(jiffies, timeout);
+}
+
+static void
+qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
+{
+       u32 reg[4];
+       int i;
+
+       /* load the ARL table into an array */
+       for (i = 0; i < 4; i++)
+               reg[i] = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4));
+
+       /* vid - 83:72 */
+       fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
+       /* aging - 67:64 */
+       fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
+       /* portmask - 54:48 */
+       fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
+       /* mac - 47:0 */
+       fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
+       fdb->mac[1] = reg[1] & 0xff;
+       fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
+       fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
+       fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
+       fdb->mac[5] = reg[0] & 0xff;
+}
+
+static void
+qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
+               u8 aging)
+{
+       u32 reg[3] = { 0 };
+       int i;
+
+       /* vid - 83:72 */
+       reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
+       /* aging - 67:64 */
+       reg[2] |= aging & QCA8K_ATU_STATUS_M;
+       /* portmask - 54:48 */
+       reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
+       /* mac - 47:0 */
+       reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
+       reg[1] |= mac[1];
+       reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
+       reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
+       reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
+       reg[0] |= mac[5];
+
+       /* load the array into the ARL table */
+       for (i = 0; i < 3; i++)
+               qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
+}
+
+static int
+qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
+{
+       u32 reg;
+
+       /* Set the command and FDB index */
+       reg = QCA8K_ATU_FUNC_BUSY;
+       reg |= cmd;
+       if (port >= 0) {
+               reg |= QCA8K_ATU_FUNC_PORT_EN;
+               reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
+       }
+
+       /* Write the function register triggering the table access */
+       qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+
+       /* wait for completion */
+       if (qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY))
+               return -1;
+
+       /* Check for table full violation when adding an entry */
+       if (cmd == QCA8K_FDB_LOAD) {
+               reg = qca8k_read(priv, QCA8K_REG_ATU_FUNC);
+               if (reg & QCA8K_ATU_FUNC_FULL)
+                       return -1;
+       }
+
+       return 0;
+}
+
+static int
+qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
+{
+       int ret;
+
+       qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
+       ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
+       if (ret >= 0)
+               qca8k_fdb_read(priv, fdb);
+
+       return ret;
+}
+
+static int
+qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
+             u16 vid, u8 aging)
+{
+       int ret;
+
+       mutex_lock(&priv->reg_mutex);
+       qca8k_fdb_write(priv, vid, port_mask, mac, aging);
+       ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+       mutex_unlock(&priv->reg_mutex);
+
+       return ret;
+}
+
+static int
+qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
+{
+       int ret;
+
+       mutex_lock(&priv->reg_mutex);
+       qca8k_fdb_write(priv, vid, port_mask, mac, 0);
+       ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+       mutex_unlock(&priv->reg_mutex);
+
+       return ret;
+}
+
+static void
+qca8k_fdb_flush(struct qca8k_priv *priv)
+{
+       mutex_lock(&priv->reg_mutex);
+       qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
+       mutex_unlock(&priv->reg_mutex);
+}
+
+static void
+qca8k_mib_init(struct qca8k_priv *priv)
+{
+       mutex_lock(&priv->reg_mutex);
+       qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
+       qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+       qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+       qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+       mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
+{
+       u32 reg;
+
+       switch (port) {
+       case 0:
+               reg = QCA8K_REG_PORT0_PAD_CTRL;
+               break;
+       case 6:
+               reg = QCA8K_REG_PORT6_PAD_CTRL;
+               break;
+       default:
+               pr_err("Can't set PAD_CTRL on port %d\n", port);
+               return -EINVAL;
+       }
+
+       /* Configure a port to be directly connected to an external
+        * PHY or MAC.
+        */
+       switch (mode) {
+       case PHY_INTERFACE_MODE_RGMII:
+               qca8k_write(priv, reg,
+                           QCA8K_PORT_PAD_RGMII_EN |
+                           QCA8K_PORT_PAD_RGMII_TX_DELAY(3) |
+                           QCA8K_PORT_PAD_RGMII_RX_DELAY(3));
+
+               /* According to the datasheet, RGMII delay is enabled through
+                * PORT5_PAD_CTRL for all ports, rather than individual port
+                * registers
+                */
+               qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+                           QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+               qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
+               break;
+       default:
+               pr_err("xMII mode %d not supported\n", mode);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void
+qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
+{
+       u32 mask = QCA8K_PORT_STATUS_TXMAC;
+
+       /* Port 0 and 6 have no internal PHY */
+       if ((port > 0) && (port < 6))
+               mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+       if (enable)
+               qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
+       else
+               qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
+}
+
+static int
+qca8k_setup(struct dsa_switch *ds)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       int ret, i, phy_mode = -1;
+
+       /* Make sure that port 0 is the cpu port */
+       if (!dsa_is_cpu_port(ds, 0)) {
+               pr_err("port 0 is not the CPU port\n");
+               return -EINVAL;
+       }
+
+       mutex_init(&priv->reg_mutex);
+
+       /* Start by setting up the register mapping */
+       priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
+                                       &qca8k_regmap_config);
+       if (IS_ERR(priv->regmap))
+               pr_warn("regmap initialization failed");
+
+       /* Initialize CPU port pad mode (xMII type, delays...) */
+       phy_mode = of_get_phy_mode(ds->ports[ds->dst->cpu_port].dn);
+       if (phy_mode < 0) {
+               pr_err("Can't find phy-mode for master device\n");
+               return phy_mode;
+       }
+       ret = qca8k_set_pad_ctrl(priv, QCA8K_CPU_PORT, phy_mode);
+       if (ret < 0)
+               return ret;
+
+       /* Enable CPU Port */
+       qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
+                     QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+       qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1);
+       priv->port_sts[QCA8K_CPU_PORT].enabled = 1;
+
+       /* Enable MIB counters */
+       qca8k_mib_init(priv);
+
+       /* Enable QCA header mode on the cpu port */
+       qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
+                   QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
+                   QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+
+       /* Disable forwarding by default on all ports */
+       for (i = 0; i < QCA8K_NUM_PORTS; i++)
+               qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+                         QCA8K_PORT_LOOKUP_MEMBER, 0);
+
+       /* Disable MAC by default on all user ports */
+       for (i = 1; i < QCA8K_NUM_PORTS; i++)
+               if (ds->enabled_port_mask & BIT(i))
+                       qca8k_port_set_status(priv, i, 0);
+
+       /* Forward all unknown frames to CPU port for Linux processing */
+       qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+                   BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
+                   BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
+                   BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
+                   BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+
+       /* Setup connection between CPU port & user ports */
+       for (i = 0; i < DSA_MAX_PORTS; i++) {
+               /* CPU port gets connected to all user ports of the switch */
+               if (dsa_is_cpu_port(ds, i)) {
+                       qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
+                                 QCA8K_PORT_LOOKUP_MEMBER,
+                                 ds->enabled_port_mask);
+               }
+
+               /* Invividual user ports get connected to CPU port only */
+               if (ds->enabled_port_mask & BIT(i)) {
+                       int shift = 16 * (i % 2);
+
+                       qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+                                 QCA8K_PORT_LOOKUP_MEMBER,
+                                 BIT(QCA8K_CPU_PORT));
+
+                       /* Enable ARP Auto-learning by default */
+                       qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+                                     QCA8K_PORT_LOOKUP_LEARN);
+
+                       /* For port based vlans to work we need to set the
+                        * default egress vid
+                        */
+                       qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
+                                 0xffff << shift, 1 << shift);
+                       qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
+                                   QCA8K_PORT_VLAN_CVID(1) |
+                                   QCA8K_PORT_VLAN_SVID(1));
+               }
+       }
+
+       /* Flush the FDB table */
+       qca8k_fdb_flush(priv);
+
+       return 0;
+}
+
+static int
+qca8k_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+       /* The subsystem always calls this function so add an empty stub */
+       return 0;
+}
+
+static int
+qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+       return mdiobus_read(priv->bus, phy, regnum);
+}
+
+static int
+qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+       return mdiobus_write(priv->bus, phy, regnum, val);
+}
+
+static void
+qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
+               strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
+                       ETH_GSTRING_LEN);
+}
+
+static void
+qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+                       uint64_t *data)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       const struct qca8k_mib_desc *mib;
+       u32 reg, i;
+       u64 hi;
+
+       for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
+               mib = &ar8327_mib[i];
+               reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+               data[i] = qca8k_read(priv, reg);
+               if (mib->size == 2) {
+                       hi = qca8k_read(priv, reg + 4);
+                       data[i] |= hi << 32;
+               }
+       }
+}
+
+static int
+qca8k_get_sset_count(struct dsa_switch *ds)
+{
+       return ARRAY_SIZE(ar8327_mib);
+}
+
+static void
+qca8k_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
+       u32 reg;
+
+       mutex_lock(&priv->reg_mutex);
+       reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL);
+       if (enable)
+               reg |= lpi_en;
+       else
+               reg &= ~lpi_en;
+       qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+       mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+qca8k_eee_init(struct dsa_switch *ds, int port,
+              struct phy_device *phy)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       struct ethtool_eee *p = &priv->port_sts[port].eee;
+       int ret;
+
+       p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
+
+       ret = phy_init_eee(phy, 0);
+       if (ret)
+               return ret;
+
+       qca8k_eee_enable_set(ds, port, true);
+
+       return 0;
+}
+
+static int
+qca8k_set_eee(struct dsa_switch *ds, int port,
+             struct phy_device *phydev,
+             struct ethtool_eee *e)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       struct ethtool_eee *p = &priv->port_sts[port].eee;
+       int ret = 0;
+
+       p->eee_enabled = e->eee_enabled;
+
+       if (e->eee_enabled) {
+               p->eee_enabled = qca8k_eee_init(ds, port, phydev);
+               if (!p->eee_enabled)
+                       ret = -EOPNOTSUPP;
+       }
+       qca8k_eee_enable_set(ds, port, p->eee_enabled);
+
+       return ret;
+}
+
+static int
+qca8k_get_eee(struct dsa_switch *ds, int port,
+             struct ethtool_eee *e)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       struct ethtool_eee *p = &priv->port_sts[port].eee;
+       struct net_device *netdev = ds->ports[port].netdev;
+       int ret;
+
+       ret = phy_ethtool_get_eee(netdev->phydev, p);
+       if (!ret)
+               e->eee_active =
+                       !!(p->supported & p->advertised & p->lp_advertised);
+       else
+               e->eee_active = 0;
+
+       e->eee_enabled = p->eee_enabled;
+
+       return ret;
+}
+
+static void
+qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       u32 stp_state;
+
+       switch (state) {
+       case BR_STATE_DISABLED:
+               stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
+               break;
+       case BR_STATE_BLOCKING:
+               stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
+               break;
+       case BR_STATE_LISTENING:
+               stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
+               break;
+       case BR_STATE_LEARNING:
+               stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
+               break;
+       case BR_STATE_FORWARDING:
+       default:
+               stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
+               break;
+       }
+
+       qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+                 QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+}
+
+static int
+qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+                      struct net_device *bridge)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       int port_mask = BIT(QCA8K_CPU_PORT);
+       int i;
+
+       priv->port_sts[port].bridge_dev = bridge;
+
+       for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+               if (priv->port_sts[i].bridge_dev != bridge)
+                       continue;
+               /* Add this port to the portvlan mask of the other ports
+                * in the bridge
+                */
+               qca8k_reg_set(priv,
+                             QCA8K_PORT_LOOKUP_CTRL(i),
+                             BIT(port));
+               if (i != port)
+                       port_mask |= BIT(i);
+       }
+       /* Add all other ports to this ports portvlan mask */
+       qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+                 QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+       return 0;
+}
+
+static void
+qca8k_port_bridge_leave(struct dsa_switch *ds, int port)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       int i;
+
+       for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+               if (priv->port_sts[i].bridge_dev !=
+                   priv->port_sts[port].bridge_dev)
+                       continue;
+               /* Remove this port to the portvlan mask of the other ports
+                * in the bridge
+                */
+               qca8k_reg_clear(priv,
+                               QCA8K_PORT_LOOKUP_CTRL(i),
+                               BIT(port));
+       }
+       priv->port_sts[port].bridge_dev = NULL;
+       /* Set the cpu port to be the only one in the portvlan mask of
+        * this port
+        */
+       qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+                 QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT));
+}
+
+static int
+qca8k_port_enable(struct dsa_switch *ds, int port,
+                 struct phy_device *phy)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+       qca8k_port_set_status(priv, port, 1);
+       priv->port_sts[port].enabled = 1;
+
+       return 0;
+}
+
+static void
+qca8k_port_disable(struct dsa_switch *ds, int port,
+                  struct phy_device *phy)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+       qca8k_port_set_status(priv, port, 0);
+       priv->port_sts[port].enabled = 0;
+}
+
+static int
+qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+                     u16 port_mask, u16 vid)
+{
+       /* Set the vid to the port vlan id if no vid is set */
+       if (!vid)
+               vid = 1;
+
+       return qca8k_fdb_add(priv, addr, port_mask, vid,
+                            QCA8K_ATU_STATUS_STATIC);
+}
+
+static int
+qca8k_port_fdb_prepare(struct dsa_switch *ds, int port,
+                      const struct switchdev_obj_port_fdb *fdb,
+                      struct switchdev_trans *trans)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+       /* The FDB table for static and auto learned entries is the same. We
+        * need to reserve an entry with no port_mask set to make sure that
+        * when port_fdb_add is called an entry is still available. Otherwise
+        * the last free entry might have been used up by auto learning
+        */
+       return qca8k_port_fdb_insert(priv, fdb->addr, 0, fdb->vid);
+}
+
+static void
+qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+                  const struct switchdev_obj_port_fdb *fdb,
+                  struct switchdev_trans *trans)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       u16 port_mask = BIT(port);
+
+       /* Update the FDB entry adding the port_mask */
+       qca8k_port_fdb_insert(priv, fdb->addr, port_mask, fdb->vid);
+}
+
+static int
+qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+                  const struct switchdev_obj_port_fdb *fdb)
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       u16 port_mask = BIT(port);
+       u16 vid = fdb->vid;
+
+       if (!vid)
+               vid = 1;
+
+       return qca8k_fdb_del(priv, fdb->addr, port_mask, vid);
+}
+
+static int
+qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+                   struct switchdev_obj_port_fdb *fdb,
+                   int (*cb)(struct switchdev_obj *obj))
+{
+       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+       struct qca8k_fdb _fdb = { 0 };
+       int cnt = QCA8K_NUM_FDB_RECORDS;
+       int ret = 0;
+
+       mutex_lock(&priv->reg_mutex);
+       while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
+               if (!_fdb.aging)
+                       break;
+
+               ether_addr_copy(fdb->addr, _fdb.mac);
+               fdb->vid = _fdb.vid;
+               if (_fdb.aging == QCA8K_ATU_STATUS_STATIC)
+                       fdb->ndm_state = NUD_NOARP;
+               else
+                       fdb->ndm_state = NUD_REACHABLE;
+
+               ret = cb(&fdb->obj);
+               if (ret)
+                       break;
+       }
+       mutex_unlock(&priv->reg_mutex);
+
+       return 0;
+}
+
+static enum dsa_tag_protocol
+qca8k_get_tag_protocol(struct dsa_switch *ds)
+{
+       return DSA_TAG_PROTO_QCA;
+}
+
+static struct dsa_switch_ops qca8k_switch_ops = {
+       .get_tag_protocol       = qca8k_get_tag_protocol,
+       .setup                  = qca8k_setup,
+       .set_addr               = qca8k_set_addr,
+       .get_strings            = qca8k_get_strings,
+       .phy_read               = qca8k_phy_read,
+       .phy_write              = qca8k_phy_write,
+       .get_ethtool_stats      = qca8k_get_ethtool_stats,
+       .get_sset_count         = qca8k_get_sset_count,
+       .get_eee                = qca8k_get_eee,
+       .set_eee                = qca8k_set_eee,
+       .port_enable            = qca8k_port_enable,
+       .port_disable           = qca8k_port_disable,
+       .port_stp_state_set     = qca8k_port_stp_state_set,
+       .port_bridge_join       = qca8k_port_bridge_join,
+       .port_bridge_leave      = qca8k_port_bridge_leave,
+       .port_fdb_prepare       = qca8k_port_fdb_prepare,
+       .port_fdb_add           = qca8k_port_fdb_add,
+       .port_fdb_del           = qca8k_port_fdb_del,
+       .port_fdb_dump          = qca8k_port_fdb_dump,
+};
+
+static int
+qca8k_sw_probe(struct mdio_device *mdiodev)
+{
+       struct qca8k_priv *priv;
+       u32 id;
+
+       /* allocate the private data struct so that we can probe the switches
+        * ID register
+        */
+       priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->bus = mdiodev->bus;
+
+       /* read the switches ID register */
+       id = qca8k_read(priv, QCA8K_REG_MASK_CTRL);
+       id >>= QCA8K_MASK_CTRL_ID_S;
+       id &= QCA8K_MASK_CTRL_ID_M;
+       if (id != QCA8K_ID_QCA8337)
+               return -ENODEV;
+
+       priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+       if (!priv->ds)
+               return -ENOMEM;
+
+       priv->ds->priv = priv;
+       priv->ds->dev = &mdiodev->dev;
+       priv->ds->ops = &qca8k_switch_ops;
+       mutex_init(&priv->reg_mutex);
+       dev_set_drvdata(&mdiodev->dev, priv);
+
+       return dsa_register_switch(priv->ds, priv->ds->dev->of_node);
+}
+
+static void
+qca8k_sw_remove(struct mdio_device *mdiodev)
+{
+       struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+       int i;
+
+       for (i = 0; i < QCA8K_NUM_PORTS; i++)
+               qca8k_port_set_status(priv, i, 0);
+
+       dsa_unregister_switch(priv->ds);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void
+qca8k_set_pm(struct qca8k_priv *priv, int enable)
+{
+       int i;
+
+       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+               if (!priv->port_sts[i].enabled)
+                       continue;
+
+               qca8k_port_set_status(priv, i, enable);
+       }
+}
+
+static int qca8k_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct qca8k_priv *priv = platform_get_drvdata(pdev);
+
+       qca8k_set_pm(priv, 0);
+
+       return dsa_switch_suspend(priv->ds);
+}
+
+static int qca8k_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct qca8k_priv *priv = platform_get_drvdata(pdev);
+
+       qca8k_set_pm(priv, 1);
+
+       return dsa_switch_resume(priv->ds);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
+                        qca8k_suspend, qca8k_resume);
+
+static const struct of_device_id qca8k_of_match[] = {
+       { .compatible = "qca,qca8337" },
+       { /* sentinel */ },
+};
+
+static struct mdio_driver qca8kmdio_driver = {
+       .probe  = qca8k_sw_probe,
+       .remove = qca8k_sw_remove,
+       .mdiodrv.driver = {
+               .name = "qca8k",
+               .of_match_table = qca8k_of_match,
+               .pm = &qca8k_pm_ops,
+       },
+};
+
+static int __init
+qca8kmdio_driver_register(void)
+{
+       return mdio_driver_register(&qca8kmdio_driver);
+}
+module_init(qca8kmdio_driver_register);
+
+static void __exit
+qca8kmdio_driver_unregister(void)
+{
+       mdio_driver_unregister(&qca8kmdio_driver);
+}
+module_exit(qca8kmdio_driver_unregister);
+
+MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qca8k");
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
new file mode 100644 (file)
index 0000000..2014647
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCA8K_H
+#define __QCA8K_H
+
+#include <linux/delay.h>
+#include <linux/regmap.h>
+
+#define QCA8K_NUM_PORTS                                        7
+
+#define PHY_ID_QCA8337                                 0x004dd036
+#define QCA8K_ID_QCA8337                               0x13
+
+#define QCA8K_NUM_FDB_RECORDS                          2048
+
+#define QCA8K_CPU_PORT                                 0
+
+/* Global control registers */
+#define QCA8K_REG_MASK_CTRL                            0x000
+#define   QCA8K_MASK_CTRL_ID_M                         0xff
+#define   QCA8K_MASK_CTRL_ID_S                         8
+#define QCA8K_REG_PORT0_PAD_CTRL                       0x004
+#define QCA8K_REG_PORT5_PAD_CTRL                       0x008
+#define QCA8K_REG_PORT6_PAD_CTRL                       0x00c
+#define   QCA8K_PORT_PAD_RGMII_EN                      BIT(26)
+#define   QCA8K_PORT_PAD_RGMII_TX_DELAY(x)             \
+                                               ((0x8 + (x & 0x3)) << 22)
+#define   QCA8K_PORT_PAD_RGMII_RX_DELAY(x)             \
+                                               ((0x10 + (x & 0x3)) << 20)
+#define   QCA8K_PORT_PAD_RGMII_RX_DELAY_EN             BIT(24)
+#define   QCA8K_PORT_PAD_SGMII_EN                      BIT(7)
+#define QCA8K_REG_MODULE_EN                            0x030
+#define   QCA8K_MODULE_EN_MIB                          BIT(0)
+#define QCA8K_REG_MIB                                  0x034
+#define   QCA8K_MIB_FLUSH                              BIT(24)
+#define   QCA8K_MIB_CPU_KEEP                           BIT(20)
+#define   QCA8K_MIB_BUSY                               BIT(17)
+#define QCA8K_GOL_MAC_ADDR0                            0x60
+#define QCA8K_GOL_MAC_ADDR1                            0x64
+#define QCA8K_REG_PORT_STATUS(_i)                      (0x07c + (_i) * 4)
+#define   QCA8K_PORT_STATUS_SPEED                      GENMASK(2, 0)
+#define   QCA8K_PORT_STATUS_SPEED_S                    0
+#define   QCA8K_PORT_STATUS_TXMAC                      BIT(2)
+#define   QCA8K_PORT_STATUS_RXMAC                      BIT(3)
+#define   QCA8K_PORT_STATUS_TXFLOW                     BIT(4)
+#define   QCA8K_PORT_STATUS_RXFLOW                     BIT(5)
+#define   QCA8K_PORT_STATUS_DUPLEX                     BIT(6)
+#define   QCA8K_PORT_STATUS_LINK_UP                    BIT(8)
+#define   QCA8K_PORT_STATUS_LINK_AUTO                  BIT(9)
+#define   QCA8K_PORT_STATUS_LINK_PAUSE                 BIT(10)
+#define QCA8K_REG_PORT_HDR_CTRL(_i)                    (0x9c + (_i * 4))
+#define   QCA8K_PORT_HDR_CTRL_RX_MASK                  GENMASK(3, 2)
+#define   QCA8K_PORT_HDR_CTRL_RX_S                     2
+#define   QCA8K_PORT_HDR_CTRL_TX_MASK                  GENMASK(1, 0)
+#define   QCA8K_PORT_HDR_CTRL_TX_S                     0
+#define   QCA8K_PORT_HDR_CTRL_ALL                      2
+#define   QCA8K_PORT_HDR_CTRL_MGMT                     1
+#define   QCA8K_PORT_HDR_CTRL_NONE                     0
+
+/* EEE control registers */
+#define QCA8K_REG_EEE_CTRL                             0x100
+#define  QCA8K_REG_EEE_CTRL_LPI_EN(_i)                 ((_i + 1) * 2)
+
+/* ACL registers */
+#define QCA8K_REG_PORT_VLAN_CTRL0(_i)                  (0x420 + (_i * 8))
+#define   QCA8K_PORT_VLAN_CVID(x)                      (x << 16)
+#define   QCA8K_PORT_VLAN_SVID(x)                      x
+#define QCA8K_REG_PORT_VLAN_CTRL1(_i)                  (0x424 + (_i * 8))
+#define QCA8K_REG_IPV4_PRI_BASE_ADDR                   0x470
+#define QCA8K_REG_IPV4_PRI_ADDR_MASK                   0x474
+
+/* Lookup registers */
+#define QCA8K_REG_ATU_DATA0                            0x600
+#define   QCA8K_ATU_ADDR2_S                            24
+#define   QCA8K_ATU_ADDR3_S                            16
+#define   QCA8K_ATU_ADDR4_S                            8
+#define QCA8K_REG_ATU_DATA1                            0x604
+#define   QCA8K_ATU_PORT_M                             0x7f
+#define   QCA8K_ATU_PORT_S                             16
+#define   QCA8K_ATU_ADDR0_S                            8
+#define QCA8K_REG_ATU_DATA2                            0x608
+#define   QCA8K_ATU_VID_M                              0xfff
+#define   QCA8K_ATU_VID_S                              8
+#define   QCA8K_ATU_STATUS_M                           0xf
+#define   QCA8K_ATU_STATUS_STATIC                      0xf
+#define QCA8K_REG_ATU_FUNC                             0x60c
+#define   QCA8K_ATU_FUNC_BUSY                          BIT(31)
+#define   QCA8K_ATU_FUNC_PORT_EN                       BIT(14)
+#define   QCA8K_ATU_FUNC_MULTI_EN                      BIT(13)
+#define   QCA8K_ATU_FUNC_FULL                          BIT(12)
+#define   QCA8K_ATU_FUNC_PORT_M                                0xf
+#define   QCA8K_ATU_FUNC_PORT_S                                8
+#define QCA8K_REG_GLOBAL_FW_CTRL0                      0x620
+#define   QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN            BIT(10)
+#define QCA8K_REG_GLOBAL_FW_CTRL1                      0x624
+#define   QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S              24
+#define   QCA8K_GLOBAL_FW_CTRL1_BC_DP_S                        16
+#define   QCA8K_GLOBAL_FW_CTRL1_MC_DP_S                        8
+#define   QCA8K_GLOBAL_FW_CTRL1_UC_DP_S                        0
+#define QCA8K_PORT_LOOKUP_CTRL(_i)                     (0x660 + (_i) * 0xc)
+#define   QCA8K_PORT_LOOKUP_MEMBER                     GENMASK(6, 0)
+#define   QCA8K_PORT_LOOKUP_STATE_MASK                 GENMASK(18, 16)
+#define   QCA8K_PORT_LOOKUP_STATE_DISABLED             (0 << 16)
+#define   QCA8K_PORT_LOOKUP_STATE_BLOCKING             (1 << 16)
+#define   QCA8K_PORT_LOOKUP_STATE_LISTENING            (2 << 16)
+#define   QCA8K_PORT_LOOKUP_STATE_LEARNING             (3 << 16)
+#define   QCA8K_PORT_LOOKUP_STATE_FORWARD              (4 << 16)
+#define   QCA8K_PORT_LOOKUP_STATE                      GENMASK(18, 16)
+#define   QCA8K_PORT_LOOKUP_LEARN                      BIT(20)
+
+/* Pkt edit registers */
+#define QCA8K_EGRESS_VLAN(x)                           (0x0c70 + (4 * (x / 2)))
+
+/* L3 registers */
+#define QCA8K_HROUTER_CONTROL                          0xe00
+#define   QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M         GENMASK(17, 16)
+#define   QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S         16
+#define   QCA8K_HROUTER_CONTROL_ARP_AGE_MODE           1
+#define QCA8K_HROUTER_PBASED_CONTROL1                  0xe08
+#define QCA8K_HROUTER_PBASED_CONTROL2                  0xe0c
+#define QCA8K_HNAT_CONTROL                             0xe38
+
+/* MIB registers */
+#define QCA8K_PORT_MIB_COUNTER(_i)                     (0x1000 + (_i) * 0x100)
+
+/* QCA specific MII registers */
+#define MII_ATH_MMD_ADDR                               0x0d
+#define MII_ATH_MMD_DATA                               0x0e
+
+enum {
+       QCA8K_PORT_SPEED_10M = 0,
+       QCA8K_PORT_SPEED_100M = 1,
+       QCA8K_PORT_SPEED_1000M = 2,
+       QCA8K_PORT_SPEED_ERR = 3,
+};
+
+enum qca8k_fdb_cmd {
+       QCA8K_FDB_FLUSH = 1,
+       QCA8K_FDB_LOAD = 2,
+       QCA8K_FDB_PURGE = 3,
+       QCA8K_FDB_NEXT = 6,
+       QCA8K_FDB_SEARCH = 7,
+};
+
+struct ar8xxx_port_status {
+       struct ethtool_eee eee;
+       struct net_device *bridge_dev;
+       int enabled;
+};
+
+struct qca8k_priv {
+       struct regmap *regmap;
+       struct mii_bus *bus;
+       struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
+       struct dsa_switch *ds;
+       struct mutex reg_mutex;
+};
+
+struct qca8k_mib_desc {
+       unsigned int size;
+       unsigned int offset;
+       const char *name;
+};
+
+struct qca8k_fdb {
+       u16 vid;
+       u8 port_mask;
+       u8 aging;
+       u8 mac[6];
+};
+
+#endif /* __QCA8K_H */
index 25c55ab..9133e79 100644 (file)
@@ -3089,7 +3089,7 @@ static void set_rx_mode(struct net_device *dev)
        iowrite16(new_mode, ioaddr + EL3_CMD);
 }
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
 /* Setup the card so that it can receive frames with an 802.1q VLAN tag.
    Note that this must be done after each RxReset due to some backwards
    compatibility logic in the Cyclone and Tornado ASICs */
index 1d10696..8af2c88 100644 (file)
@@ -66,7 +66,7 @@
  */
 #define ZEROCOPY
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
 #define VLAN_SUPPORT
 #endif
 
index dcf2a1f..dc57f27 100644 (file)
 #define WRITERDP(lp, x)        out_be16(lp->base + LANCE_RDP, (x))
 #define READRDP(lp)    in_be16(lp->base + LANCE_RDP)
 
-#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
+#if IS_ENABLED(CONFIG_HPLANCE)
 #include "hplance.h"
 
 #undef WRITERAP
 #undef WRITERDP
 #undef READRDP
 
-#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
+#if IS_ENABLED(CONFIG_MVME147_NET)
 
 /* Lossage Factor Nine, Mr Sulu. */
 #define WRITERAP(lp, x)        (lp->writerap(lp, x))
@@ -86,7 +86,7 @@ static inline __u16 READRDP(struct lance_private *lp)
 }
 
 #endif
-#endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */
+#endif /* IS_ENABLED(CONFIG_HPLANCE) */
 
 /* debugging output macros, various flavours */
 /* #define TEST_HITS */
index 9496005..f92cc97 100644 (file)
@@ -89,7 +89,7 @@ Revision History:
 #include <asm/byteorder.h>
 #include <asm/uaccess.h>
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
 #define AMD8111E_VLAN_TAG_USED 1
 #else
 #define AMD8111E_VLAN_TAG_USED 0
index 22a7b26..d372d42 100644 (file)
@@ -54,55 +54,68 @@ static void xgene_get_drvinfo(struct net_device *ndev,
        sprintf(info->bus_info, "%s", pdev->name);
 }
 
-static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+static int xgene_get_link_ksettings(struct net_device *ndev,
+                                   struct ethtool_link_ksettings *cmd)
 {
        struct xgene_enet_pdata *pdata = netdev_priv(ndev);
-       struct phy_device *phydev = pdata->phy_dev;
+       struct phy_device *phydev = ndev->phydev;
+       u32 supported;
 
        if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
                if (phydev == NULL)
                        return -ENODEV;
 
-               return phy_ethtool_gset(phydev, cmd);
+               return phy_ethtool_ksettings_get(phydev, cmd);
        } else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
                if (pdata->mdio_driver) {
                        if (!phydev)
                                return -ENODEV;
 
-                       return phy_ethtool_gset(phydev, cmd);
+                       return phy_ethtool_ksettings_get(phydev, cmd);
                }
 
-               cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
-                                SUPPORTED_MII;
-               cmd->advertising = cmd->supported;
-               ethtool_cmd_speed_set(cmd, SPEED_1000);
-               cmd->duplex = DUPLEX_FULL;
-               cmd->port = PORT_MII;
-               cmd->transceiver = XCVR_INTERNAL;
-               cmd->autoneg = AUTONEG_ENABLE;
+               supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+                       SUPPORTED_MII;
+               ethtool_convert_legacy_u32_to_link_mode(
+                       cmd->link_modes.supported,
+                       supported);
+               ethtool_convert_legacy_u32_to_link_mode(
+                       cmd->link_modes.advertising,
+                       supported);
+
+               cmd->base.speed = SPEED_1000;
+               cmd->base.duplex = DUPLEX_FULL;
+               cmd->base.port = PORT_MII;
+               cmd->base.autoneg = AUTONEG_ENABLE;
        } else {
-               cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
-               cmd->advertising = cmd->supported;
-               ethtool_cmd_speed_set(cmd, SPEED_10000);
-               cmd->duplex = DUPLEX_FULL;
-               cmd->port = PORT_FIBRE;
-               cmd->transceiver = XCVR_INTERNAL;
-               cmd->autoneg = AUTONEG_DISABLE;
+               supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
+               ethtool_convert_legacy_u32_to_link_mode(
+                       cmd->link_modes.supported,
+                       supported);
+               ethtool_convert_legacy_u32_to_link_mode(
+                       cmd->link_modes.advertising,
+                       supported);
+
+               cmd->base.speed = SPEED_10000;
+               cmd->base.duplex = DUPLEX_FULL;
+               cmd->base.port = PORT_FIBRE;
+               cmd->base.autoneg = AUTONEG_DISABLE;
        }
 
        return 0;
 }
 
-static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+static int xgene_set_link_ksettings(struct net_device *ndev,
+                                   const struct ethtool_link_ksettings *cmd)
 {
        struct xgene_enet_pdata *pdata = netdev_priv(ndev);
-       struct phy_device *phydev = pdata->phy_dev;
+       struct phy_device *phydev = ndev->phydev;
 
        if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
                if (!phydev)
                        return -ENODEV;
 
-               return phy_ethtool_sset(phydev, cmd);
+               return phy_ethtool_ksettings_set(phydev, cmd);
        }
 
        if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
@@ -110,7 +123,7 @@ static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
                        if (!phydev)
                                return -ENODEV;
 
-                       return phy_ethtool_sset(phydev, cmd);
+                       return phy_ethtool_ksettings_set(phydev, cmd);
                }
        }
 
@@ -152,12 +165,12 @@ static void xgene_get_ethtool_stats(struct net_device *ndev,
 
 static const struct ethtool_ops xgene_ethtool_ops = {
        .get_drvinfo = xgene_get_drvinfo,
-       .get_settings = xgene_get_settings,
-       .set_settings = xgene_set_settings,
        .get_link = ethtool_op_get_link,
        .get_strings = xgene_get_strings,
        .get_sset_count = xgene_get_sset_count,
-       .get_ethtool_stats = xgene_get_ethtool_stats
+       .get_ethtool_stats = xgene_get_ethtool_stats,
+       .get_link_ksettings = xgene_get_link_ksettings,
+       .set_link_ksettings = xgene_set_link_ksettings,
 };
 
 void xgene_enet_set_ethtool_ops(struct net_device *ndev)
index da413c8..c481f10 100644 (file)
@@ -713,7 +713,7 @@ static void xgene_enet_adjust_link(struct net_device *ndev)
 {
        struct xgene_enet_pdata *pdata = netdev_priv(ndev);
        const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
-       struct phy_device *phydev = pdata->phy_dev;
+       struct phy_device *phydev = ndev->phydev;
 
        if (phydev->link) {
                if (pdata->phy_speed != phydev->speed) {
@@ -773,15 +773,13 @@ int xgene_enet_phy_connect(struct net_device *ndev)
                        netdev_err(ndev, "Could not connect to PHY\n");
                        return -ENODEV;
                }
-
-               pdata->phy_dev = phy_dev;
        } else {
 #ifdef CONFIG_ACPI
                struct acpi_device *adev = acpi_phy_find_device(dev);
                if (adev)
-                       pdata->phy_dev =  adev->driver_data;
-
-               phy_dev = pdata->phy_dev;
+                       phy_dev = adev->driver_data;
+               else
+                       phy_dev = NULL;
 
                if (!phy_dev ||
                    phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
@@ -849,8 +847,6 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
        if (!phy)
                return -EIO;
 
-       pdata->phy_dev = phy;
-
        return ret;
 }
 
@@ -890,14 +886,18 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
 
 void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
 {
-       if (pdata->phy_dev)
-               phy_disconnect(pdata->phy_dev);
+       struct net_device *ndev = pdata->ndev;
+
+       if (ndev->phydev)
+               phy_disconnect(ndev->phydev);
 }
 
 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
 {
-       if (pdata->phy_dev)
-               phy_disconnect(pdata->phy_dev);
+       struct net_device *ndev = pdata->ndev;
+
+       if (ndev->phydev)
+               phy_disconnect(ndev->phydev);
 
        mdiobus_unregister(pdata->mdio_bus);
        mdiobus_free(pdata->mdio_bus);
index b8b9495..522ba92 100644 (file)
@@ -748,8 +748,8 @@ static int xgene_enet_open(struct net_device *ndev)
        if (ret)
                return ret;
 
-       if (pdata->phy_dev) {
-               phy_start(pdata->phy_dev);
+       if (ndev->phydev) {
+               phy_start(ndev->phydev);
        } else {
                schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
                netif_carrier_off(ndev);
@@ -772,8 +772,8 @@ static int xgene_enet_close(struct net_device *ndev)
        mac_ops->tx_disable(pdata);
        mac_ops->rx_disable(pdata);
 
-       if (pdata->phy_dev)
-               phy_stop(pdata->phy_dev);
+       if (ndev->phydev)
+               phy_stop(ndev->phydev);
        else
                cancel_delayed_work_sync(&pdata->link_work);
 
index b339fc1..7735371 100644 (file)
@@ -174,7 +174,6 @@ struct xgene_cle_ops {
 struct xgene_enet_pdata {
        struct net_device *ndev;
        struct mii_bus *mdio_bus;
-       struct phy_device *phy_dev;
        int phy_speed;
        struct clk *clk;
        struct platform_device *pdev;
index 9887cee..c0f84b7 100644 (file)
@@ -1028,7 +1028,7 @@ static int __alx_open(struct alx_priv *alx, bool resume)
 
        err = alx_alloc_rings(alx);
        if (err)
-               return err;
+               goto out_disable_adv_intr;
 
        alx_configure(alx);
 
@@ -1049,6 +1049,8 @@ static int __alx_open(struct alx_priv *alx, bool resume)
 
 out_free_rings:
        alx_free_rings(alx);
+out_disable_adv_intr:
+       alx_disable_advanced_intr(alx);
        return err;
 }
 
index 8fc3f3c..ecd357d 100644 (file)
@@ -50,7 +50,7 @@
 #include <linux/log2.h>
 #include <linux/aer.h>
 
-#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#if IS_ENABLED(CONFIG_CNIC)
 #define BCM_CNIC 1
 #include "cnic_if.h"
 #endif
index 6f9104b..dab61a8 100644 (file)
@@ -772,6 +772,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
                (bp->common.bc_ver & 0xff00) >> 8,
                (bp->common.bc_ver & 0xff));
 
+       if (pci_channel_offline(bp->pdev)) {
+               BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
+               return;
+       }
+
        val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
        if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
                BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
@@ -9415,10 +9420,16 @@ unload_error:
        /* Release IRQs */
        bnx2x_free_irq(bp);
 
-       /* Reset the chip */
-       rc = bnx2x_reset_hw(bp, reset_code);
-       if (rc)
-               BNX2X_ERR("HW_RESET failed\n");
+       /* Reset the chip, unless PCI function is offline. If we reach this
+        * point following a PCI error handling, it means device is really
+        * in a bad state and we're about to remove it, so reset the chip
+        * is not a good idea.
+        */
+       if (!pci_channel_offline(bp->pdev)) {
+               rc = bnx2x_reset_hw(bp, reset_code);
+               if (rc)
+                       BNX2X_ERR("HW_RESET failed\n");
+       }
 
        /* Report UNLOAD_DONE to MCP */
        bnx2x_send_unload_done(bp, keep_link);
index 2cf7910..228c964 100644 (file)
@@ -353,8 +353,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                push_len = (length + sizeof(*tx_push) + 7) / 8;
                if (push_len > 16) {
                        __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
-                       __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
-                                        push_len - 16);
+                       __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+                                        (push_len - 16) << 1);
                } else {
                        __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
                                         push_len);
index 6592612..a2551bc 100644 (file)
@@ -14012,6 +14012,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
        if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
            (!ec->rx_coalesce_usecs) ||
            (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+           (!ec->tx_coalesce_usecs) ||
            (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
            (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
            (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
@@ -14022,16 +14023,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
            (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
                return -EINVAL;
 
-       /* No rx interrupts will be generated if both are zero */
-       if ((ec->rx_coalesce_usecs == 0) &&
-           (ec->rx_max_coalesced_frames == 0))
-               return -EINVAL;
-
-       /* No tx interrupts will be generated if both are zero */
-       if ((ec->tx_coalesce_usecs == 0) &&
-           (ec->tx_max_coalesced_frames == 0))
-               return -EINVAL;
-
        /* Only copy relevant parameters, ignore all others. */
        tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
        tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
index 3256839..63144bb 100644 (file)
@@ -1339,6 +1339,24 @@ dma_error:
        return 0;
 }
 
+static inline int macb_clear_csum(struct sk_buff *skb)
+{
+       /* no change for packets without checksum offloading */
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
+       /* make sure we can modify the header */
+       if (unlikely(skb_cow_head(skb, 0)))
+               return -1;
+
+       /* initialize checksum field
+        * This is required - at least for Zynq, which otherwise calculates
+        * wrong UDP header checksums for UDP packets with UDP data len <=2
+        */
+       *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
+       return 0;
+}
+
 static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        u16 queue_index = skb_get_queue_mapping(skb);
@@ -1378,6 +1396,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
+       if (macb_clear_csum(skb)) {
+               dev_kfree_skb_any(skb);
+               goto unlock;
+       }
+
        /* Map socket buffer for DMA transfer */
        if (!macb_tx_map(bp, queue, skb)) {
                dev_kfree_skb_any(skb);
index dd63f96..18d12d3 100644 (file)
@@ -258,6 +258,7 @@ struct nicvf {
        u8                      sqs_id;
        bool                    sqs_mode;
        bool                    hw_tso;
+       bool                    t88;
 
        /* Receive buffer alloc */
        u32                     rb_page_offset;
index 25618d2..2bbf4cb 100644 (file)
@@ -282,9 +282,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
        u16 sdevid;
        u64 lmac_cfg;
 
-       /* Max value that can be set is 60 */
-       if (size > 60)
-               size = 60;
+       /* There is a issue in HW where-in while sending GSO sized
+        * pkts as part of TSO, if pkt len falls below this size
+        * NIC will zero PAD packet and also updates IP total length.
+        * Hence set this value to lessthan min pkt size of MAC+IP+TCP
+        * headers, BGX will do the padding to transmit 64 byte pkt.
+        */
+       if (size > 52)
+               size = 52;
 
        pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
        /* 81xx's RGX has only one LMAC */
index 06c014e..7d00162 100644 (file)
@@ -522,6 +522,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
        struct nicvf *nic = netdev_priv(netdev);
        struct snd_queue *sq;
        struct sq_hdr_subdesc *hdr;
+       struct sq_hdr_subdesc *tso_sqe;
 
        sq = &nic->qs->sq[cqe_tx->sq_idx];
 
@@ -536,17 +537,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
 
        nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
        skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
-       /* For TSO offloaded packets only one SQE will have a valid SKB */
        if (skb) {
+               /* Check for dummy descriptor used for HW TSO offload on 88xx */
+               if (hdr->dont_send) {
+                       /* Get actual TSO descriptors and free them */
+                       tso_sqe =
+                        (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+                       nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+               }
                nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
                prefetch(skb);
                napi_consume_skb(skb, budget);
                sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
        } else {
-               /* In case of HW TSO, HW sends a CQE for each segment of a TSO
-                * packet instead of a single CQE for the whole TSO packet
-                * transmitted. Each of this CQE points to the same SQE, so
-                * avoid freeing same SQE multiple times.
+               /* In case of SW TSO on 88xx, only last segment will have
+                * a SKB attached, so just free SQEs here.
                 */
                if (!nic->hw_tso)
                        nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
@@ -1516,6 +1521,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct net_device *netdev;
        struct nicvf *nic;
        int    err, qcount;
+       u16    sdevid;
 
        err = pci_enable_device(pdev);
        if (err) {
@@ -1588,6 +1594,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!pass1_silicon(nic->pdev))
                nic->hw_tso = true;
 
+       pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+       if (sdevid == 0xA134)
+               nic->t88 = true;
+
        /* Check if this VF is in QS only mode */
        if (nic->sqs_mode)
                return 0;
index 7d90856..178c5c7 100644 (file)
@@ -953,6 +953,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb)
        return num_edescs + sh->gso_segs;
 }
 
+#define POST_CQE_DESC_COUNT 2
+
 /* Get the number of SQ descriptors needed to xmit this skb */
 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
 {
@@ -963,6 +965,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
                return subdesc_cnt;
        }
 
+       /* Dummy descriptors to get TSO pkt completion notification */
+       if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
+               subdesc_cnt += POST_CQE_DESC_COUNT;
+
        if (skb_shinfo(skb)->nr_frags)
                subdesc_cnt += skb_shinfo(skb)->nr_frags;
 
@@ -980,14 +986,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
        struct sq_hdr_subdesc *hdr;
 
        hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
-       sq->skbuff[qentry] = (u64)skb;
-
        memset(hdr, 0, SND_QUEUE_DESC_SIZE);
        hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
-       /* Enable notification via CQE after processing SQE */
-       hdr->post_cqe = 1;
-       /* No of subdescriptors following this */
-       hdr->subdesc_cnt = subdesc_cnt;
+
+       if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
+               /* post_cqe = 0, to avoid HW posting a CQE for every TSO
+                * segment transmitted on 88xx.
+                */
+               hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
+       } else {
+               sq->skbuff[qentry] = (u64)skb;
+               /* Enable notification via CQE after processing SQE */
+               hdr->post_cqe = 1;
+               /* No of subdescriptors following this */
+               hdr->subdesc_cnt = subdesc_cnt;
+       }
        hdr->tot_len = len;
 
        /* Offload checksum calculation to HW */
@@ -1038,6 +1051,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
        gather->addr = data;
 }
 
+/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
+ * packet so that a CQE is posted as a notifation for transmission of
+ * TSO packet.
+ */
+static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
+                                           int tso_sqe, struct sk_buff *skb)
+{
+       struct sq_imm_subdesc *imm;
+       struct sq_hdr_subdesc *hdr;
+
+       sq->skbuff[qentry] = (u64)skb;
+
+       hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+       memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+       hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+       /* Enable notification via CQE after processing SQE */
+       hdr->post_cqe = 1;
+       /* There is no packet to transmit here */
+       hdr->dont_send = 1;
+       hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
+       hdr->tot_len = 1;
+       /* Actual TSO header SQE index, needed for cleanup */
+       hdr->rsvd2 = tso_sqe;
+
+       qentry = nicvf_get_nxt_sqentry(sq, qentry);
+       imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
+       memset(imm, 0, SND_QUEUE_DESC_SIZE);
+       imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
+       imm->len = 1;
+}
+
 /* Segment a TSO packet into 'gso_size' segments and append
  * them to SQ for transfer
  */
@@ -1111,7 +1155,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
 {
        int i, size;
-       int subdesc_cnt;
+       int subdesc_cnt, tso_sqe = 0;
        int sq_num, qentry;
        struct queue_set *qs;
        struct snd_queue *sq;
@@ -1146,6 +1190,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
        /* Add SQ header subdesc */
        nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
                                 skb, skb->len);
+       tso_sqe = qentry;
 
        /* Add SQ gather subdescs */
        qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1169,6 +1214,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
        }
 
 doorbell:
+       if (nic->t88 && skb_shinfo(skb)->gso_size) {
+               qentry = nicvf_get_nxt_sqentry(sq, qentry);
+               nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+       }
+
        /* make sure all memory stores are done before ringing doorbell */
        smp_wmb();
 
index c8fd4f8..f3ed9ce 100644 (file)
@@ -1648,14 +1648,15 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
 
        if (csum_ok && !pkt->err_vec &&
            (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
-               if (!pkt->ip_frag)
+               if (!pkt->ip_frag) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
-               else {
+                       rxq->stats.rx_cso++;
+               } else if (pkt->l2info & htonl(RXF_IP_F)) {
                        __sum16 c = (__force __sum16)pkt->csum;
                        skb->csum = csum_unfold(c);
                        skb->ip_summed = CHECKSUM_COMPLETE;
+                       rxq->stats.rx_cso++;
                }
-               rxq->stats.rx_cso++;
        } else
                skb_checksum_none_assert(skb);
 
index 2362230..2534e30 100644 (file)
@@ -1,3 +1,5 @@
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+
 obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o
 
-libcxgb-y := libcxgb_ppm.o
+libcxgb-y := libcxgb_ppm.o libcxgb_cm.o
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
new file mode 100644 (file)
index 0000000..0f0de5b
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials
+ *       provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
+
+#include "libcxgb_cm.h"
+
+void
+cxgb_get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
+               int *iptype, __u8 *local_ip, __u8 *peer_ip,
+               __be16 *local_port, __be16 *peer_port)
+{
+       int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
+                     ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
+                     T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+       int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
+                    IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
+                    T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+       struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+       struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
+       struct tcphdr *tcp = (struct tcphdr *)
+                            ((u8 *)(req + 1) + eth_len + ip_len);
+
+       if (ip->version == 4) {
+               pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
+                        __func__, ntohl(ip->saddr), ntohl(ip->daddr),
+                        ntohs(tcp->source), ntohs(tcp->dest));
+               *iptype = 4;
+               memcpy(peer_ip, &ip->saddr, 4);
+               memcpy(local_ip, &ip->daddr, 4);
+       } else {
+               pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
+                        __func__, ip6->saddr.s6_addr, ip6->daddr.s6_addr,
+                        ntohs(tcp->source), ntohs(tcp->dest));
+               *iptype = 6;
+               memcpy(peer_ip, ip6->saddr.s6_addr, 16);
+               memcpy(local_ip, ip6->daddr.s6_addr, 16);
+       }
+       *peer_port = tcp->source;
+       *local_port = tcp->dest;
+}
+EXPORT_SYMBOL(cxgb_get_4tuple);
+
+static bool
+cxgb_our_interface(struct cxgb4_lld_info *lldi,
+                  struct net_device *(*get_real_dev)(struct net_device *),
+                  struct net_device *egress_dev)
+{
+       int i;
+
+       egress_dev = get_real_dev(egress_dev);
+       for (i = 0; i < lldi->nports; i++)
+               if (lldi->ports[i] == egress_dev)
+                       return true;
+       return false;
+}
+
+struct dst_entry *
+cxgb_find_route(struct cxgb4_lld_info *lldi,
+               struct net_device *(*get_real_dev)(struct net_device *),
+               __be32 local_ip, __be32 peer_ip, __be16 local_port,
+               __be16 peer_port, u8 tos)
+{
+       struct rtable *rt;
+       struct flowi4 fl4;
+       struct neighbour *n;
+
+       rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
+                                  peer_port, local_port, IPPROTO_TCP,
+                                  tos, 0);
+       if (IS_ERR(rt))
+               return NULL;
+       n = dst_neigh_lookup(&rt->dst, &peer_ip);
+       if (!n)
+               return NULL;
+       if (!cxgb_our_interface(lldi, get_real_dev, n->dev) &&
+           !(n->dev->flags & IFF_LOOPBACK)) {
+               neigh_release(n);
+               dst_release(&rt->dst);
+               return NULL;
+       }
+       neigh_release(n);
+       return &rt->dst;
+}
+EXPORT_SYMBOL(cxgb_find_route);
+
+struct dst_entry *
+cxgb_find_route6(struct cxgb4_lld_info *lldi,
+                struct net_device *(*get_real_dev)(struct net_device *),
+                __u8 *local_ip, __u8 *peer_ip, __be16 local_port,
+                __be16 peer_port, u8 tos, __u32 sin6_scope_id)
+{
+       struct dst_entry *dst = NULL;
+
+       if (IS_ENABLED(CONFIG_IPV6)) {
+               struct flowi6 fl6;
+
+               memset(&fl6, 0, sizeof(fl6));
+               memcpy(&fl6.daddr, peer_ip, 16);
+               memcpy(&fl6.saddr, local_ip, 16);
+               if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+                       fl6.flowi6_oif = sin6_scope_id;
+               dst = ip6_route_output(&init_net, NULL, &fl6);
+               if (!dst)
+                       goto out;
+               if (!cxgb_our_interface(lldi, get_real_dev,
+                                       ip6_dst_idev(dst)->dev) &&
+                   !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+                       dst_release(dst);
+                       dst = NULL;
+               }
+       }
+
+out:
+       return dst;
+}
+EXPORT_SYMBOL(cxgb_find_route6);
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
new file mode 100644 (file)
index 0000000..515b94f
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials
+ *       provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIBCXGB_CM_H__
+#define __LIBCXGB_CM_H__
+
+
+#include <net/tcp.h>
+
+#include <cxgb4.h>
+#include <t4_msg.h>
+#include <l2t.h>
+
+void
+cxgb_get_4tuple(struct cpl_pass_accept_req *, enum chip_type,
+               int *, __u8 *, __u8 *, __be16 *, __be16 *);
+struct dst_entry *
+cxgb_find_route(struct cxgb4_lld_info *,
+               struct net_device *(*)(struct net_device *),
+               __be32, __be32, __be16, __be16, u8);
+struct dst_entry *
+cxgb_find_route6(struct cxgb4_lld_info *,
+                struct net_device *(*)(struct net_device *),
+                __u8 *, __u8 *, __be16, __be16, u8, __u32);
+
+/* Returns whether a CPL status conveys negative advice.
+ */
+static inline bool cxgb_is_neg_adv(unsigned int status)
+{
+       return status == CPL_ERR_RTX_NEG_ADVICE ||
+              status == CPL_ERR_PERSIST_NEG_ADVICE ||
+              status == CPL_ERR_KEEPALV_NEG_ADVICE;
+}
+
+static inline void
+cxgb_best_mtu(const unsigned short *mtus, unsigned short mtu,
+             unsigned int *idx, int use_ts, int ipv6)
+{
+       unsigned short hdr_size = (ipv6 ?
+                                  sizeof(struct ipv6hdr) :
+                                  sizeof(struct iphdr)) +
+                                 sizeof(struct tcphdr) +
+                                 (use_ts ?
+                                  round_up(TCPOLEN_TIMESTAMP, 4) : 0);
+       unsigned short data_size = mtu - hdr_size;
+
+       cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+}
+
+static inline u32 cxgb_compute_wscale(u32 win)
+{
+       u32 wscale = 0;
+
+       while (wscale < 14 && (65535 << wscale) < win)
+               wscale++;
+       return wscale;
+}
+
+static inline void
+cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
+{
+       struct cpl_tid_release *req;
+
+       req = (struct cpl_tid_release *)__skb_put(skb, len);
+       memset(req, 0, len);
+
+       INIT_TP_WR(req, tid);
+       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+       set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+}
+
+static inline void
+cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+                     void *handle, arp_err_handler_t handler)
+{
+       struct cpl_close_con_req *req;
+
+       req = (struct cpl_close_con_req *)__skb_put(skb, len);
+       memset(req, 0, len);
+
+       INIT_TP_WR(req, tid);
+       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+       set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+       t4_set_arp_err_handler(skb, handle, handler);
+}
+
+static inline void
+cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+                 void *handle, arp_err_handler_t handler)
+{
+       struct cpl_abort_req *req;
+
+       req = (struct cpl_abort_req *)__skb_put(skb, len);
+       memset(req, 0, len);
+
+       INIT_TP_WR(req, tid);
+       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+       req->cmd = CPL_ABORT_SEND_RST;
+       set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+       t4_set_arp_err_handler(skb, handle, handler);
+}
+
+static inline void
+cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
+{
+       struct cpl_abort_rpl *rpl;
+
+       rpl = (struct cpl_abort_rpl *)__skb_put(skb, len);
+       memset(rpl, 0, len);
+
+       INIT_TP_WR(rpl, tid);
+       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+       rpl->cmd = CPL_ABORT_NO_RST;
+       set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+}
+
+static inline void
+cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+                   u32 credit_dack)
+{
+       struct cpl_rx_data_ack *req;
+
+       req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
+       memset(req, 0, len);
+
+       INIT_TP_WR(req, tid);
+       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid));
+       req->credit_dack = cpu_to_be32(credit_dack);
+       set_wr_txq(skb, CPL_PRIORITY_ACK, chan);
+}
+#endif
index 58c6338..79d8009 100644 (file)
@@ -867,7 +867,7 @@ static int netdev_open(struct net_device *dev)
 
        /* Initialize other registers. */
        __set_mac_addr(dev);
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
        iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 #else
        iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
index 8ddeedb..ddf0260 100644 (file)
@@ -192,7 +192,7 @@ struct fman_mac_params {
        /* A handle to the FM object this port related to */
        void *fm;
        /* MDIO exceptions interrupt source - not valid for all
-        * MACs; MUST be set to 'NO_IRQ' for MACs that don't have
+        * MACs; MUST be set to 0 for MACs that don't have
         * mdio-irq, or for polling
         */
        void *dev_id; /* device cookie used by the exception cbs */
index 7919896..120c758 100644 (file)
@@ -90,7 +90,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
        int ret = -EINVAL;
 
        fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-       if (fep->interrupt == NO_IRQ)
+       if (!fep->interrupt)
                goto out;
 
        fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
index 21fbaaf..777beff 100644 (file)
@@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
        struct platform_device *ofdev = to_platform_device(fep->dev);
 
        fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-       if (fep->interrupt == NO_IRQ)
+       if (!fep->interrupt)
                return -EINVAL;
 
        fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
index 9d52e1e..15abd37 100644 (file)
@@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
        struct platform_device *ofdev = to_platform_device(fep->dev);
 
        fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-       if (fep->interrupt == NO_IRQ)
+       if (!fep->interrupt)
                return -EINVAL;
 
        fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
index f3c63dc..446c7b3 100644 (file)
@@ -195,7 +195,7 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
        return 0;
 }
 
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+#if IS_ENABLED(CONFIG_GIANFAR)
 /*
  * Return the TBIPA address, starting from the address
  * of the mapped GFAR MDIO registers (struct gfar)
@@ -228,7 +228,7 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
 }
 #endif
 
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+#if IS_ENABLED(CONFIG_UCC_GETH)
 /*
  * Return the TBIPAR address for a QE MDIO node, starting from the address
  * of the mapped MII registers (struct fsl_pq_mii)
@@ -306,7 +306,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
 #endif
 
 static const struct of_device_id fsl_pq_mdio_match[] = {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+#if IS_ENABLED(CONFIG_GIANFAR)
        {
                .compatible = "fsl,gianfar-tbi",
                .data = &(struct fsl_pq_mdio_data) {
@@ -344,7 +344,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
                },
        },
 #endif
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+#if IS_ENABLED(CONFIG_UCC_GETH)
        {
                .compatible = "fsl,ucc-mdio",
                .data = &(struct fsl_pq_mdio_data) {
index a90ab40..415ffa1 100644 (file)
@@ -761,7 +761,7 @@ static const struct ethtool_ops hip04_ethtool_ops = {
        .get_drvinfo            = hip04_get_drvinfo,
 };
 
-static struct net_device_ops hip04_netdev_ops = {
+static const struct net_device_ops hip04_netdev_ops = {
        .ndo_open               = hip04_mac_open,
        .ndo_stop               = hip04_mac_stop,
        .ndo_get_stats          = hip04_get_stats,
index befb4ac..ce235b7 100644 (file)
@@ -89,10 +89,10 @@ static char version[] __initdata =
 #define DEB(x,y)       if (i596_debug & (x)) y
 
 
-#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
+#if IS_ENABLED(CONFIG_MVME16x_NET)
 #define ENABLE_MVME16x_NET
 #endif
-#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
+#if IS_ENABLED(CONFIG_BVME6000_NET)
 #define ENABLE_BVME6000_NET
 #endif
 
index 4c9771d..ec4d0f3 100644 (file)
@@ -2750,7 +2750,7 @@ static int emac_probe(struct platform_device *ofdev)
        /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
        dev->emac_irq = irq_of_parse_and_map(np, 0);
        dev->wol_irq = irq_of_parse_and_map(np, 1);
-       if (dev->emac_irq == NO_IRQ) {
+       if (!dev->emac_irq) {
                printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
                goto err_free;
        }
@@ -2913,9 +2913,9 @@ static int emac_probe(struct platform_device *ofdev)
  err_reg_unmap:
        iounmap(dev->emacp);
  err_irq_unmap:
-       if (dev->wol_irq != NO_IRQ)
+       if (dev->wol_irq)
                irq_dispose_mapping(dev->wol_irq);
-       if (dev->emac_irq != NO_IRQ)
+       if (dev->emac_irq)
                irq_dispose_mapping(dev->emac_irq);
  err_free:
        free_netdev(ndev);
@@ -2957,9 +2957,9 @@ static int emac_remove(struct platform_device *ofdev)
        emac_dbg_unregister(dev);
        iounmap(dev->emacp);
 
-       if (dev->wol_irq != NO_IRQ)
+       if (dev->wol_irq)
                irq_dispose_mapping(dev->wol_irq);
-       if (dev->emac_irq != NO_IRQ)
+       if (dev->emac_irq)
                irq_dispose_mapping(dev->emac_irq);
 
        free_netdev(dev->ndev);
index fdb5cdb..aaf6fec 100644 (file)
@@ -597,9 +597,8 @@ static int mal_probe(struct platform_device *ofdev)
                mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
        }
 
-       if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
-           mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ ||
-           mal->rxde_irq == NO_IRQ) {
+       if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
+           !mal->txde_irq  || !mal->rxde_irq) {
                printk(KERN_ERR
                       "mal%d: failed to map interrupts !\n", index);
                err = -ENODEV;
index 62454d7..bfe17d9 100644 (file)
@@ -1424,7 +1424,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
                scrq = adapter->tx_scrq[i];
                scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
 
-               if (scrq->irq == NO_IRQ) {
+               if (!scrq->irq) {
                        rc = -EINVAL;
                        dev_err(dev, "Error mapping irq\n");
                        goto req_tx_irq_failed;
@@ -1444,7 +1444,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
        for (i = 0; i < adapter->req_rx_queues; i++) {
                scrq = adapter->rx_scrq[i];
                scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
-               if (scrq->irq == NO_IRQ) {
+               if (!scrq->irq) {
                        rc = -EINVAL;
                        dev_err(dev, "Error mapping irq\n");
                        goto req_rx_irq_failed;
index fcdea29..61b0fc4 100644 (file)
@@ -5112,9 +5112,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                                       DCB_CAP_DCBX_VER_IEEE;
 
                        pf->flags |= I40E_FLAG_DCB_CAPABLE;
-                       /* Enable DCB tagging only when more than one TC */
+                       /* Enable DCB tagging only when more than one TC
+                        * or explicitly disable if only one TC
+                        */
                        if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
                                pf->flags |= I40E_FLAG_DCB_ENABLED;
+                       else
+                               pf->flags &= ~I40E_FLAG_DCB_ENABLED;
                        dev_dbg(&pf->pdev->dev,
                                "DCBX offload is supported for this PF.\n");
                }
@@ -5715,7 +5719,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        u8 type;
 
        /* Not DCB capable or capability disabled */
-       if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+       if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
                return ret;
 
        /* Ignore if event is not for Nearest Bridge */
@@ -7895,6 +7899,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
 #endif
                                       I40E_FLAG_RSS_ENABLED    |
                                       I40E_FLAG_DCB_CAPABLE    |
+                                      I40E_FLAG_DCB_ENABLED    |
                                       I40E_FLAG_SRIOV_ENABLED  |
                                       I40E_FLAG_FD_SB_ENABLED  |
                                       I40E_FLAG_FD_ATR_ENABLED |
@@ -10531,6 +10536,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                               I40E_FLAG_FD_SB_ENABLED  |
                               I40E_FLAG_FD_ATR_ENABLED |
                               I40E_FLAG_DCB_CAPABLE    |
+                              I40E_FLAG_DCB_ENABLED    |
                               I40E_FLAG_SRIOV_ENABLED  |
                               I40E_FLAG_VMDQ_ENABLED);
        } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
@@ -10554,7 +10560,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                /* Not enough queues for all TCs */
                if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
                    (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
-                       pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+                       pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+                                       I40E_FLAG_DCB_ENABLED);
                        dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
                }
                pf->num_lan_qps = max_t(int, pf->rss_size_max,
@@ -10951,7 +10958,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        err = i40e_init_pf_dcb(pf);
        if (err) {
                dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
-               pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+               pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
                /* Continue without DCB enabled */
        }
 #endif /* CONFIG_I40E_DCB */
index 33c0250..b06e32d 100644 (file)
 #include "ixgbe_type.h"
 #include "ixgbe_common.h"
 #include "ixgbe_dcb.h"
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
 #define IXGBE_FCOE
 #include "ixgbe_fcoe.h"
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif /* IS_ENABLED(CONFIG_FCOE) */
 #ifdef CONFIG_IXGBE_DCA
 #include <linux/dca.h>
 #endif
index e74fd44..a32de43 100644 (file)
@@ -133,7 +133,7 @@ struct mvneta_bm_pool {
 void *mvneta_frag_alloc(unsigned int frag_size);
 void mvneta_frag_free(unsigned int frag_size, void *data);
 
-#if defined(CONFIG_MVNETA_BM) || defined(CONFIG_MVNETA_BM_MODULE)
+#if IS_ENABLED(CONFIG_MVNETA_BM)
 void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
                            struct mvneta_bm_pool *bm_pool, u8 port_map);
 void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
index 473977d..522fe8d 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 #include <linux/clk.h>
+#include <linux/pm_runtime.h>
 #include <linux/if_vlan.h>
 #include <linux/reset.h>
 #include <linux/tcp.h>
@@ -50,6 +51,10 @@ static const struct mtk_ethtool_stats {
        MTK_ETHTOOL_STAT(rx_flow_control_packets),
 };
 
+static const char * const mtk_clks_source_name[] = {
+       "ethif", "esw", "gp1", "gp2"
+};
+
 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
 {
        __raw_writel(val, eth->base + reg);
@@ -140,6 +145,9 @@ static void mtk_phy_link_adjust(struct net_device *dev)
                  MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
                  MAC_MCR_BACKPR_EN;
 
+       if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+               return;
+
        switch (mac->phy_dev->speed) {
        case SPEED_1000:
                mcr |= MAC_MCR_SPEED_1000;
@@ -226,7 +234,7 @@ static int mtk_phy_connect(struct mtk_mac *mac)
 {
        struct mtk_eth *eth = mac->hw;
        struct device_node *np;
-       u32 val, ge_mode;
+       u32 val;
 
        np = of_parse_phandle(mac->of_node, "phy-handle", 0);
        if (!np && of_phy_is_fixed_link(mac->of_node))
@@ -240,18 +248,18 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        case PHY_INTERFACE_MODE_RGMII_RXID:
        case PHY_INTERFACE_MODE_RGMII_ID:
        case PHY_INTERFACE_MODE_RGMII:
-               ge_mode = 0;
+               mac->ge_mode = 0;
                break;
        case PHY_INTERFACE_MODE_MII:
-               ge_mode = 1;
+               mac->ge_mode = 1;
                break;
        case PHY_INTERFACE_MODE_REVMII:
-               ge_mode = 2;
+               mac->ge_mode = 2;
                break;
        case PHY_INTERFACE_MODE_RMII:
                if (!mac->id)
                        goto err_phy;
-               ge_mode = 3;
+               mac->ge_mode = 3;
                break;
        default:
                goto err_phy;
@@ -260,7 +268,7 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        /* put the gmac into the right mode */
        regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
        val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
-       val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
+       val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
        regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
 
        mtk_phy_connect_node(eth, mac, np);
@@ -291,7 +299,7 @@ err_phy:
 static int mtk_mdio_init(struct mtk_eth *eth)
 {
        struct device_node *mii_np;
-       int err;
+       int ret;
 
        mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
        if (!mii_np) {
@@ -300,13 +308,13 @@ static int mtk_mdio_init(struct mtk_eth *eth)
        }
 
        if (!of_device_is_available(mii_np)) {
-               err = 0;
+               ret = -ENODEV;
                goto err_put_node;
        }
 
-       eth->mii_bus = mdiobus_alloc();
+       eth->mii_bus = devm_mdiobus_alloc(eth->dev);
        if (!eth->mii_bus) {
-               err = -ENOMEM;
+               ret = -ENOMEM;
                goto err_put_node;
        }
 
@@ -317,19 +325,11 @@ static int mtk_mdio_init(struct mtk_eth *eth)
        eth->mii_bus->parent = eth->dev;
 
        snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
-       err = of_mdiobus_register(eth->mii_bus, mii_np);
-       if (err)
-               goto err_free_bus;
-
-       return 0;
-
-err_free_bus:
-       mdiobus_free(eth->mii_bus);
+       ret = of_mdiobus_register(eth->mii_bus, mii_np);
 
 err_put_node:
        of_node_put(mii_np);
-       eth->mii_bus = NULL;
-       return err;
+       return ret;
 }
 
 static void mtk_mdio_cleanup(struct mtk_eth *eth)
@@ -338,8 +338,6 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
                return;
 
        mdiobus_unregister(eth->mii_bus);
-       of_node_put(eth->mii_bus->dev.of_node);
-       mdiobus_free(eth->mii_bus);
 }
 
 static inline void mtk_irq_disable(struct mtk_eth *eth,
@@ -375,6 +373,9 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
        if (ret)
                return ret;
 
+       if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+               return -EBUSY;
+
        spin_lock_bh(&mac->hw->page_lock);
        mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
                MTK_GDMA_MAC_ADRH(mac->id));
@@ -589,14 +590,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        dma_addr_t mapped_addr;
        unsigned int nr_frags;
        int i, n_desc = 1;
-       u32 txd4 = 0;
+       u32 txd4 = 0, fport;
 
        itxd = ring->next_free;
        if (itxd == ring->last_free)
                return -ENOMEM;
 
        /* set the forward port */
-       txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+       fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+       txd4 |= fport;
 
        tx_buf = mtk_desc_to_tx_buf(ring, itxd);
        memset(tx_buf, 0, sizeof(*tx_buf));
@@ -654,7 +656,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
                        WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
                                               TX_DMA_PLEN0(frag_map_size) |
                                               last_frag * TX_DMA_LS0));
-                       WRITE_ONCE(txd->txd4, 0);
+                       WRITE_ONCE(txd->txd4, fport);
 
                        tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
                        tx_buf = mtk_desc_to_tx_buf(ring, txd);
@@ -774,6 +776,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        spin_lock(&eth->page_lock);
 
+       if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+               goto drop;
+
        tx_num = mtk_cal_txd_req(skb);
        if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
                mtk_stop_queue(eth);
@@ -846,6 +851,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
 
                netdev = eth->netdev[mac];
 
+               if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+                       goto release_desc;
+
                /* alloc new buffer */
                new_data = napi_alloc_frag(ring->frag_size);
                if (unlikely(!new_data)) {
@@ -865,7 +873,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                /* receive data */
                skb = build_skb(data, ring->frag_size);
                if (unlikely(!skb)) {
-                       put_page(virt_to_head_page(new_data));
+                       skb_free_frag(new_data);
                        netdev->stats.rx_dropped++;
                        goto release_desc;
                }
@@ -1418,15 +1426,44 @@ static int mtk_stop(struct net_device *dev)
        return 0;
 }
 
-static int __init mtk_hw_init(struct mtk_eth *eth)
+static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
 {
-       int err, i;
+       regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+                          reset_bits,
+                          reset_bits);
+
+       usleep_range(1000, 1100);
+       regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+                          reset_bits,
+                          ~reset_bits);
+       mdelay(10);
+}
+
+static int mtk_hw_init(struct mtk_eth *eth)
+{
+       int i, val;
+
+       if (test_and_set_bit(MTK_HW_INIT, &eth->state))
+               return 0;
 
-       /* reset the frame engine */
-       reset_control_assert(eth->rstc);
-       usleep_range(10, 20);
-       reset_control_deassert(eth->rstc);
-       usleep_range(10, 20);
+       pm_runtime_enable(eth->dev);
+       pm_runtime_get_sync(eth->dev);
+
+       clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
+       clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
+       clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
+       clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
+       ethsys_reset(eth, RSTCTRL_FE);
+       ethsys_reset(eth, RSTCTRL_PPE);
+
+       regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!eth->mac[i])
+                       continue;
+               val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
+               val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
+       }
+       regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
 
        /* Set GE2 driving and slew rate */
        regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
@@ -1446,19 +1483,6 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
        /* Enable RX VLan Offloading */
        mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
 
-       err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
-                              dev_name(eth->dev), eth);
-       if (err)
-               return err;
-       err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
-                              dev_name(eth->dev), eth);
-       if (err)
-               return err;
-
-       err = mtk_mdio_init(eth);
-       if (err)
-               return err;
-
        /* disable delay and normal interrupt */
        mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
        mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
@@ -1490,6 +1514,22 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
        return 0;
 }
 
+static int mtk_hw_deinit(struct mtk_eth *eth)
+{
+       if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
+               return 0;
+
+       clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
+       clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
+       clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
+       clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
+
+       pm_runtime_put_sync(eth->dev);
+       pm_runtime_disable(eth->dev);
+
+       return 0;
+}
+
 static int __init mtk_init(struct net_device *dev)
 {
        struct mtk_mac *mac = netdev_priv(dev);
@@ -1548,6 +1588,12 @@ static void mtk_pending_work(struct work_struct *work)
 
        rtnl_lock();
 
+       dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
+
+       while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
+               cpu_relax();
+
+       dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
        /* stop all devices to make sure that dma is properly shut down */
        for (i = 0; i < MTK_MAC_COUNT; i++) {
                if (!eth->netdev[i])
@@ -1555,6 +1601,27 @@ static void mtk_pending_work(struct work_struct *work)
                mtk_stop(eth->netdev[i]);
                __set_bit(i, &restart);
        }
+       dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
+
+       /* restart underlying hardware such as power, clock, pin mux
+        * and the connected phy
+        */
+       mtk_hw_deinit(eth);
+
+       if (eth->dev->pins)
+               pinctrl_select_state(eth->dev->pins->p,
+                                    eth->dev->pins->default_state);
+       mtk_hw_init(eth);
+
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!eth->mac[i] ||
+                   of_phy_is_fixed_link(eth->mac[i]->of_node))
+                       continue;
+               err = phy_init_hw(eth->mac[i]->phy_dev);
+               if (err)
+                       dev_err(eth->dev, "%s: PHY init failed.\n",
+                               eth->netdev[i]->name);
+       }
 
        /* restart DMA and enable IRQs */
        for (i = 0; i < MTK_MAC_COUNT; i++) {
@@ -1567,20 +1634,44 @@ static void mtk_pending_work(struct work_struct *work)
                        dev_close(eth->netdev[i]);
                }
        }
+
+       dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
+
+       clear_bit_unlock(MTK_RESETTING, &eth->state);
+
        rtnl_unlock();
 }
 
-static int mtk_cleanup(struct mtk_eth *eth)
+static int mtk_free_dev(struct mtk_eth *eth)
 {
        int i;
 
        for (i = 0; i < MTK_MAC_COUNT; i++) {
                if (!eth->netdev[i])
                        continue;
+               free_netdev(eth->netdev[i]);
+       }
+
+       return 0;
+}
 
+static int mtk_unreg_dev(struct mtk_eth *eth)
+{
+       int i;
+
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!eth->netdev[i])
+                       continue;
                unregister_netdev(eth->netdev[i]);
-               free_netdev(eth->netdev[i]);
        }
+
+       return 0;
+}
+
+static int mtk_cleanup(struct mtk_eth *eth)
+{
+       mtk_unreg_dev(eth);
+       mtk_free_dev(eth);
        cancel_work_sync(&eth->pending_work);
 
        return 0;
@@ -1592,6 +1683,9 @@ static int mtk_get_settings(struct net_device *dev,
        struct mtk_mac *mac = netdev_priv(dev);
        int err;
 
+       if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+               return -EBUSY;
+
        err = phy_read_status(mac->phy_dev);
        if (err)
                return -ENODEV;
@@ -1642,6 +1736,9 @@ static int mtk_nway_reset(struct net_device *dev)
 {
        struct mtk_mac *mac = netdev_priv(dev);
 
+       if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+               return -EBUSY;
+
        return genphy_restart_aneg(mac->phy_dev);
 }
 
@@ -1650,6 +1747,9 @@ static u32 mtk_get_link(struct net_device *dev)
        struct mtk_mac *mac = netdev_priv(dev);
        int err;
 
+       if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+               return -EBUSY;
+
        err = genphy_update_link(mac->phy_dev);
        if (err)
                return ethtool_op_get_link(dev);
@@ -1690,6 +1790,9 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
        unsigned int start;
        int i;
 
+       if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+               return;
+
        if (netif_running(dev) && netif_device_present(dev)) {
                if (spin_trylock(&hwstats->stats_lock)) {
                        mtk_stats_update_mac(mac);
@@ -1791,16 +1894,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
        eth->netdev[id]->features |= MTK_HW_FEATURES;
        eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
 
-       err = register_netdev(eth->netdev[id]);
-       if (err) {
-               dev_err(eth->dev, "error bringing up device\n");
-               goto free_netdev;
-       }
        eth->netdev[id]->irq = eth->irq[0];
-       netif_info(eth, probe, eth->netdev[id],
-                  "mediatek frame engine at 0x%08lx, irq %d\n",
-                  eth->netdev[id]->base_addr, eth->irq[0]);
-
        return 0;
 
 free_netdev:
@@ -1825,6 +1919,7 @@ static int mtk_probe(struct platform_device *pdev)
        if (!eth)
                return -ENOMEM;
 
+       eth->dev = &pdev->dev;
        eth->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(eth->base))
                return PTR_ERR(eth->base);
@@ -1846,12 +1941,6 @@ static int mtk_probe(struct platform_device *pdev)
                return PTR_ERR(eth->pctl);
        }
 
-       eth->rstc = devm_reset_control_get(&pdev->dev, "eth");
-       if (IS_ERR(eth->rstc)) {
-               dev_err(&pdev->dev, "no eth reset found\n");
-               return PTR_ERR(eth->rstc);
-       }
-
        for (i = 0; i < 3; i++) {
                eth->irq[i] = platform_get_irq(pdev, i);
                if (eth->irq[i] < 0) {
@@ -1859,21 +1948,16 @@ static int mtk_probe(struct platform_device *pdev)
                        return -ENXIO;
                }
        }
+       for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
+               eth->clks[i] = devm_clk_get(eth->dev,
+                                           mtk_clks_source_name[i]);
+               if (IS_ERR(eth->clks[i])) {
+                       if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
+                               return -EPROBE_DEFER;
+                       return -ENODEV;
+               }
+       }
 
-       eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
-       eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
-       eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
-       eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
-       if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
-           IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
-               return -ENODEV;
-
-       clk_prepare_enable(eth->clk_ethif);
-       clk_prepare_enable(eth->clk_esw);
-       clk_prepare_enable(eth->clk_gp1);
-       clk_prepare_enable(eth->clk_gp2);
-
-       eth->dev = &pdev->dev;
        eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
        INIT_WORK(&eth->pending_work, mtk_pending_work);
 
@@ -1891,7 +1975,35 @@ static int mtk_probe(struct platform_device *pdev)
 
                err = mtk_add_mac(eth, mac_np);
                if (err)
-                       goto err_free_dev;
+                       goto err_deinit_hw;
+       }
+
+       err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
+                              dev_name(eth->dev), eth);
+       if (err)
+               goto err_free_dev;
+
+       err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
+                              dev_name(eth->dev), eth);
+       if (err)
+               goto err_free_dev;
+
+       err = mtk_mdio_init(eth);
+       if (err)
+               goto err_free_dev;
+
+       for (i = 0; i < MTK_MAX_DEVS; i++) {
+               if (!eth->netdev[i])
+                       continue;
+
+               err = register_netdev(eth->netdev[i]);
+               if (err) {
+                       dev_err(eth->dev, "error bringing up device\n");
+                       goto err_deinit_mdio;
+               } else
+                       netif_info(eth, probe, eth->netdev[i],
+                                  "mediatek frame engine at 0x%08lx, irq %d\n",
+                                  eth->netdev[i]->base_addr, eth->irq[0]);
        }
 
        /* we run 2 devices on the same DMA ring so we need a dummy device
@@ -1907,19 +2019,29 @@ static int mtk_probe(struct platform_device *pdev)
 
        return 0;
 
+err_deinit_mdio:
+       mtk_mdio_cleanup(eth);
 err_free_dev:
-       mtk_cleanup(eth);
+       mtk_free_dev(eth);
+err_deinit_hw:
+       mtk_hw_deinit(eth);
+
        return err;
 }
 
 static int mtk_remove(struct platform_device *pdev)
 {
        struct mtk_eth *eth = platform_get_drvdata(pdev);
+       int i;
+
+       /* stop all devices to make sure that dma is properly shut down */
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!eth->netdev[i])
+                       continue;
+               mtk_stop(eth->netdev[i]);
+       }
 
-       clk_disable_unprepare(eth->clk_ethif);
-       clk_disable_unprepare(eth->clk_esw);
-       clk_disable_unprepare(eth->clk_gp1);
-       clk_disable_unprepare(eth->clk_gp2);
+       mtk_hw_deinit(eth);
 
        netif_napi_del(&eth->tx_napi);
        netif_napi_del(&eth->rx_napi);
index 7c1f3f2..79954b4 100644 (file)
 #define SYSCFG0_GE_MASK                0x3
 #define SYSCFG0_GE_MODE(x, y)  (x << (12 + (y * 2)))
 
+/*ethernet reset control register*/
+#define ETHSYS_RSTCTRL         0x34
+#define RSTCTRL_FE             BIT(6)
+#define RSTCTRL_PPE            BIT(31)
+
 struct mtk_rx_dma {
        unsigned int rxd1;
        unsigned int rxd2;
@@ -319,6 +324,22 @@ enum mtk_tx_flags {
        MTK_TX_FLAGS_PAGE0      = 0x02,
 };
 
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+       MTK_CLK_ETHIF,
+       MTK_CLK_ESW,
+       MTK_CLK_GP1,
+       MTK_CLK_GP2,
+       MTK_CLK_MAX
+};
+
+enum mtk_dev_state {
+       MTK_HW_INIT,
+       MTK_RESETTING
+};
+
 /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
  *                     by the TX descriptor    s
  * @skb:               The SKB pointer of the packet being sent
@@ -399,18 +420,15 @@ struct mtk_rx_ring {
  * @scratch_ring:      Newer SoCs need memory for a second HW managed TX ring
  * @phy_scratch_ring:  physical address of scratch_ring
  * @scratch_head:      The scratch memory that scratch_ring points to.
- * @clk_ethif:         The ethif clock
- * @clk_esw:           The switch clock
- * @clk_gp1:           The gmac1 clock
- * @clk_gp2:           The gmac2 clock
+ * @clks:              clock array for all clocks required
  * @mii_bus:           If there is a bus we need to create an instance for it
  * @pending_work:      The workqueue used to reset the dma ring
+ * @state               Initialization and runtime state of the device.
  */
 
 struct mtk_eth {
        struct device                   *dev;
        void __iomem                    *base;
-       struct reset_control            *rstc;
        spinlock_t                      page_lock;
        spinlock_t                      irq_lock;
        struct net_device               dummy_dev;
@@ -429,17 +447,17 @@ struct mtk_eth {
        struct mtk_tx_dma               *scratch_ring;
        dma_addr_t                      phy_scratch_ring;
        void                            *scratch_head;
-       struct clk                      *clk_ethif;
-       struct clk                      *clk_esw;
-       struct clk                      *clk_gp1;
-       struct clk                      *clk_gp2;
+       struct clk                      *clks[MTK_CLK_MAX];
+
        struct mii_bus                  *mii_bus;
        struct work_struct              pending_work;
+       unsigned long                   state;
 };
 
 /* struct mtk_mac -    the structure that holds the info about the MACs of the
  *                     SoC
  * @id:                        The number of the MAC
+ * @ge_mode:            Interface mode kept for setup restoring
  * @of_node:           Our devicetree node
  * @hw:                        Backpointer to our main datastruture
  * @hw_stats:          Packet statistics counter
@@ -447,6 +465,7 @@ struct mtk_eth {
  */
 struct mtk_mac {
        int                             id;
+       int                             ge_mode;
        struct device_node              *of_node;
        struct mtk_eth                  *hw;
        struct mtk_hw_stats             *hw_stats;
index 99c6bbd..b04760a 100644 (file)
@@ -94,7 +94,7 @@ static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
                *cap = true;
                break;
        case DCB_CAP_ATTR_DCBX:
-               *cap = priv->cee_params.dcbx_cap;
+               *cap = priv->dcbx_cap;
                break;
        case DCB_CAP_ATTR_PFC_TCS:
                *cap = 1 <<  mlx4_max_tc(priv->mdev->dev);
@@ -111,14 +111,14 @@ static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       return priv->cee_params.dcb_cfg.pfc_state;
+       return priv->cee_config.pfc_state;
 }
 
 static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       priv->cee_params.dcb_cfg.pfc_state = state;
+       priv->cee_config.pfc_state = state;
 }
 
 static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -126,7 +126,7 @@ static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc;
+       *setting = priv->cee_config.dcb_pfc[priority];
 }
 
 static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
@@ -134,8 +134,8 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
 
-       priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting;
-       priv->cee_params.dcb_cfg.pfc_state = true;
+       priv->cee_config.dcb_pfc[priority] = setting;
+       priv->cee_config.pfc_state = true;
 }
 
 static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -157,13 +157,11 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
 {
        struct mlx4_en_priv *priv = netdev_priv(netdev);
        struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg;
-       int err = 0;
 
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
-               return -EINVAL;
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+               return 1;
 
-       if (dcb_cfg->pfc_state) {
+       if (priv->cee_config.pfc_state) {
                int tc;
 
                priv->prof->rx_pause = 0;
@@ -171,7 +169,7 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
                for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
                        u8 tc_mask = 1 << tc;
 
-                       switch (dcb_cfg->tc_config[tc].dcb_pfc) {
+                       switch (priv->cee_config.dcb_pfc[tc]) {
                        case pfc_disabled:
                                priv->prof->tx_ppp &= ~tc_mask;
                                priv->prof->rx_ppp &= ~tc_mask;
@@ -199,15 +197,17 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
                en_dbg(DRV, priv, "Set pfc off\n");
        }
 
-       err = mlx4_SET_PORT_general(mdev->dev, priv->port,
-                                   priv->rx_skb_size + ETH_FCS_LEN,
-                                   priv->prof->tx_pause,
-                                   priv->prof->tx_ppp,
-                                   priv->prof->rx_pause,
-                                   priv->prof->rx_ppp);
-       if (err)
+       if (mlx4_SET_PORT_general(mdev->dev, priv->port,
+                                 priv->rx_skb_size + ETH_FCS_LEN,
+                                 priv->prof->tx_pause,
+                                 priv->prof->tx_ppp,
+                                 priv->prof->rx_pause,
+                                 priv->prof->rx_ppp)) {
                en_err(priv, "Failed setting pause params\n");
-       return err;
+               return 1;
+       }
+
+       return 0;
 }
 
 static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
@@ -225,7 +225,7 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
        struct mlx4_en_priv *priv = netdev_priv(dev);
        int num_tcs = 0;
 
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
                return 1;
 
        if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
@@ -238,7 +238,10 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
                priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
        }
 
-       return mlx4_en_setup_tc(dev, num_tcs);
+       if (mlx4_en_setup_tc(dev, num_tcs))
+               return 1;
+
+       return 0;
 }
 
 /* On success returns a non-zero 802.1p user priority bitmap
@@ -252,7 +255,7 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
                                .selector = idtype,
                                .protocol = id,
                             };
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
                return 0;
 
        return dcb_getapp(netdev, &app);
@@ -264,7 +267,7 @@ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
        struct mlx4_en_priv *priv = netdev_priv(netdev);
        struct dcb_app app;
 
-       if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+       if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
                return -EINVAL;
 
        memset(&app, 0, sizeof(struct dcb_app));
@@ -433,7 +436,7 @@ static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
-       return priv->cee_params.dcbx_cap;
+       return priv->dcbx_cap;
 }
 
 static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -442,7 +445,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
        struct ieee_ets ets = {0};
        struct ieee_pfc pfc = {0};
 
-       if (mode == priv->cee_params.dcbx_cap)
+       if (mode == priv->dcbx_cap)
                return 0;
 
        if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@@ -451,7 +454,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
            !(mode & DCB_CAP_DCBX_HOST))
                goto err;
 
-       priv->cee_params.dcbx_cap = mode;
+       priv->dcbx_cap = mode;
 
        ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
        pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
index 31a41ad..62516f8 100644 (file)
@@ -71,10 +71,11 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
 #ifdef CONFIG_MLX4_EN_DCB
        if (!mlx4_is_slave(priv->mdev->dev)) {
                if (up) {
-                       priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
+                       if (priv->dcbx_cap)
+                               priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
                } else {
                        priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
-                       priv->cee_params.dcb_cfg.pfc_state = false;
+                       priv->cee_config.pfc_state = false;
                }
        }
 #endif /* CONFIG_MLX4_EN_DCB */
@@ -3055,9 +3056,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        struct mlx4_en_priv *priv;
        int i;
        int err;
-#ifdef CONFIG_MLX4_EN_DCB
-       struct tc_configuration *tc;
-#endif
 
        dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
                                 MAX_TX_RINGS, MAX_RX_RINGS);
@@ -3124,16 +3122,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        priv->msg_enable = MLX4_EN_MSG_LEVEL;
 #ifdef CONFIG_MLX4_EN_DCB
        if (!mlx4_is_slave(priv->mdev->dev)) {
-               priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE |
-                                           DCB_CAP_DCBX_HOST |
-                                           DCB_CAP_DCBX_VER_IEEE;
+               priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
+                       DCB_CAP_DCBX_VER_IEEE;
                priv->flags |= MLX4_EN_DCB_ENABLED;
-               priv->cee_params.dcb_cfg.pfc_state = false;
+               priv->cee_config.pfc_state = false;
 
-               for (i = 0; i < MLX4_EN_NUM_UP; i++) {
-                       tc = &priv->cee_params.dcb_cfg.tc_config[i];
-                       tc->dcb_pfc = pfc_disabled;
-               }
+               for (i = 0; i < MLX4_EN_NUM_UP; i++)
+                       priv->cee_config.dcb_pfc[i] = pfc_disabled;
 
                if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
                        dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
index 9df87ca..e2509bb 100644 (file)
@@ -818,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
                                  &inline_ok, &fragptr);
        if (unlikely(!real_size))
-               goto tx_drop;
+               goto tx_drop_count;
 
        /* Align descriptor to TXBB size */
        desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -826,7 +826,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
                if (netif_msg_tx_err(priv))
                        en_warn(priv, "Oversized header or SG list\n");
-               goto tx_drop;
+               goto tx_drop_count;
        }
 
        bf_ok = ring->bf_enabled;
@@ -1071,9 +1071,10 @@ tx_drop_unmap:
                               PCI_DMA_TODEVICE);
        }
 
+tx_drop_count:
+       ring->tx_dropped++;
 tx_drop:
        dev_kfree_skb_any(skb);
-       ring->tx_dropped++;
        return NETDEV_TX_OK;
 }
 
@@ -1106,7 +1107,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
                goto tx_drop;
 
        if (mlx4_en_is_tx_ring_full(ring))
-               goto tx_drop;
+               goto tx_drop_count;
 
        /* fetch ring->cons far ahead before needing it to avoid stall */
        ring_cons = READ_ONCE(ring->cons);
@@ -1176,7 +1177,8 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
 
        return NETDEV_TX_OK;
 
-tx_drop:
+tx_drop_count:
        ring->tx_dropped++;
+tx_drop:
        return NETDEV_TX_BUSY;
 }
index 47867c4..a3528dd 100644 (file)
@@ -482,20 +482,10 @@ enum dcb_pfc_type {
        pfc_enabled_rx
 };
 
-struct tc_configuration {
-       enum dcb_pfc_type  dcb_pfc;
-};
-
 struct mlx4_en_cee_config {
        bool    pfc_state;
-       struct  tc_configuration tc_config[MLX4_EN_NUM_UP];
+       enum    dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
 };
-
-struct mlx4_en_cee_params {
-       u8 dcbx_cap;
-       struct mlx4_en_cee_config dcb_cfg;
-};
-
 #endif
 
 struct ethtool_flow_id {
@@ -624,7 +614,8 @@ struct mlx4_en_priv {
        struct ieee_ets ets;
        u16 maxrate[IEEE_8021QAZ_MAX_TCS];
        enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
-       struct mlx4_en_cee_params cee_params;
+       struct mlx4_en_cee_config cee_config;
+       u8 dcbx_cap;
 #endif
 #ifdef CONFIG_RFS_ACCEL
        spinlock_t filters_lock;
index 3d2095e..c5b2064 100644 (file)
@@ -52,7 +52,7 @@
 
 #define MLX4_FLAG_V_IGNORE_FCS_MASK            0x2
 #define MLX4_IGNORE_FCS_MASK                   0x1
-#define MLNX4_TX_MAX_NUMBER                    8
+#define MLX4_TC_MAX_NUMBER                     8
 
 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
 {
@@ -2022,7 +2022,7 @@ int mlx4_max_tc(struct mlx4_dev *dev)
        u8 num_tc = dev->caps.max_tc_eth;
 
        if (!num_tc)
-               num_tc = MLNX4_TX_MAX_NUMBER;
+               num_tc = MLX4_TC_MAX_NUMBER;
 
        return num_tc;
 }
index a9358cf..7dd4763 100644 (file)
 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
 
 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x1
-#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW            0x4
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW            0x3
 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW            0x6
 
 #define MLX5_MPWRQ_LOG_STRIDE_SIZE             6  /* >= 6, HW restriction */
 #define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS        8  /* >= 6, HW restriction */
-#define MLX5_MPWRQ_LOG_WQE_SZ                  17
+#define MLX5_MPWRQ_LOG_WQE_SZ                  18
 #define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
                                    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
 #define MLX5_MPWRQ_PAGES_PER_WQE               BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
@@ -287,20 +287,34 @@ struct mlx5e_rx_am { /* Adaptive Moderation */
        u8                                      tired;
 };
 
+/* a single cache unit is capable to serve one napi call (for non-striding rq)
+ * or a MPWQE (for striding rq).
+ */
+#define MLX5E_CACHE_UNIT       (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
+                                MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
+#define MLX5E_CACHE_SIZE       (2 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
+struct mlx5e_page_cache {
+       u32 head;
+       u32 tail;
+       struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
+};
+
 struct mlx5e_rq {
        /* data path */
        struct mlx5_wq_ll      wq;
        u32                    wqe_sz;
        struct sk_buff       **skb;
        struct mlx5e_mpw_info *wqe_info;
+       void                  *mtt_no_align;
        __be32                 mkey_be;
-       __be32                 umr_mkey_be;
 
        struct device         *pdev;
        struct net_device     *netdev;
        struct mlx5e_tstamp   *tstamp;
        struct mlx5e_rq_stats  stats;
        struct mlx5e_cq        cq;
+       struct mlx5e_page_cache page_cache;
+
        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
        mlx5e_fp_alloc_wqe     alloc_wqe;
        mlx5e_fp_dealloc_wqe   dealloc_wqe;
@@ -323,32 +337,15 @@ struct mlx5e_rq {
 
 struct mlx5e_umr_dma_info {
        __be64                *mtt;
-       __be64                *mtt_no_align;
        dma_addr_t             mtt_addr;
-       struct mlx5e_dma_info *dma_info;
+       struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
+       struct mlx5e_umr_wqe   wqe;
 };
 
 struct mlx5e_mpw_info {
-       union {
-               struct mlx5e_dma_info     dma_info;
-               struct mlx5e_umr_dma_info umr;
-       };
+       struct mlx5e_umr_dma_info umr;
        u16 consumed_strides;
        u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
-
-       void (*dma_pre_sync)(struct device *pdev,
-                            struct mlx5e_mpw_info *wi,
-                            u32 wqe_offset, u32 len);
-       void (*add_skb_frag)(struct mlx5e_rq *rq,
-                            struct sk_buff *skb,
-                            struct mlx5e_mpw_info *wi,
-                            u32 page_idx, u32 frag_offset, u32 len);
-       void (*copy_skb_header)(struct device *pdev,
-                               struct sk_buff *skb,
-                               struct mlx5e_mpw_info *wi,
-                               u32 page_idx, u32 offset,
-                               u32 headlen);
-       void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
 };
 
 struct mlx5e_tx_wqe_info {
@@ -668,28 +665,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
 void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
 
+void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
+                       bool recycle);
 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
 int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
-int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,        u16 ix);
 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
-void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
-void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
-                                   struct mlx5_cqe64 *cqe,
-                                   u16 byte_cnt,
-                                   struct mlx5e_mpw_info *wi,
-                                   struct sk_buff *skb);
-void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
-                                       struct mlx5_cqe64 *cqe,
-                                       u16 byte_cnt,
-                                       struct mlx5e_mpw_info *wi,
-                                       struct sk_buff *skb);
-void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
-                               struct mlx5e_mpw_info *wi);
-void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
-                                   struct mlx5e_mpw_info *wi);
+void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
+void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
 struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
 
 void mlx5e_rx_am(struct mlx5e_rq *rq);
@@ -776,6 +762,12 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
        mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
 }
 
+static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+{
+       return rq->mpwqe_mtt_offset +
+               wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
+}
+
 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
 {
        return min_t(int, mdev->priv.eq_table.num_comp_vectors,
index d1cd156..27ff401 100644 (file)
@@ -331,7 +331,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
        if (mlx5e_query_global_pause_combined(priv)) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
                        data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
-                                                         pport_per_prio_pfc_stats_desc, 0);
+                                                         pport_per_prio_pfc_stats_desc, i);
                }
        }
 
@@ -659,9 +659,10 @@ out:
 static void ptys2ethtool_supported_link(unsigned long *supported_modes,
                                        u32 eth_proto_cap)
 {
+       unsigned long proto_cap = eth_proto_cap;
        int proto;
 
-       for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+       for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
                bitmap_or(supported_modes, supported_modes,
                          ptys2ethtool_table[proto].supported,
                          __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -670,9 +671,10 @@ static void ptys2ethtool_supported_link(unsigned long *supported_modes,
 static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
                                    u32 eth_proto_cap)
 {
+       unsigned long proto_cap = eth_proto_cap;
        int proto;
 
-       for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+       for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
                bitmap_or(advertising_modes, advertising_modes,
                          ptys2ethtool_table[proto].advertised,
                          __ETHTOOL_LINK_MODE_MASK_NBITS);
index af4c61e..8595b50 100644 (file)
@@ -138,10 +138,13 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
                s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_wqe_err   += rq_stats->wqe_err;
                s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
-               s->rx_mpwqe_frag   += rq_stats->mpwqe_frag;
                s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
                s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
                s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+               s->rx_cache_reuse += rq_stats->cache_reuse;
+               s->rx_cache_full  += rq_stats->cache_full;
+               s->rx_cache_empty += rq_stats->cache_empty;
+               s->rx_cache_busy  += rq_stats->cache_busy;
 
                for (j = 0; j < priv->params.num_tc; j++) {
                        sq_stats = &priv->channel[i]->sq[j].stats;
@@ -295,6 +298,107 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
 
+static inline int mlx5e_get_wqe_mtt_sz(void)
+{
+       /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+        * To avoid copying garbage after the mtt array, we allocate
+        * a little more.
+        */
+       return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
+                    MLX5_UMR_MTT_ALIGNMENT);
+}
+
+static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
+                                      struct mlx5e_umr_wqe *wqe, u16 ix)
+{
+       struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
+       struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+       struct mlx5_wqe_data_seg      *dseg = &wqe->data;
+       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+       u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+       u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
+
+       cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+                                     ds_cnt);
+       cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
+       cseg->imm       = rq->mkey_be;
+
+       ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+       ucseg->klm_octowords =
+               cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
+       ucseg->bsf_octowords =
+               cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
+       ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+
+       dseg->lkey = sq->mkey_be;
+       dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
+}
+
+static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
+                                    struct mlx5e_channel *c)
+{
+       int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+       int mtt_sz = mlx5e_get_wqe_mtt_sz();
+       int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
+       int i;
+
+       rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
+                                   GFP_KERNEL, cpu_to_node(c->cpu));
+       if (!rq->wqe_info)
+               goto err_out;
+
+       /* We allocate more than mtt_sz as we will align the pointer */
+       rq->mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+                                       cpu_to_node(c->cpu));
+       if (unlikely(!rq->mtt_no_align))
+               goto err_free_wqe_info;
+
+       for (i = 0; i < wq_sz; i++) {
+               struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+
+               wi->umr.mtt = PTR_ALIGN(rq->mtt_no_align + i * mtt_alloc,
+                                       MLX5_UMR_ALIGN);
+               wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
+                                                 PCI_DMA_TODEVICE);
+               if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
+                       goto err_unmap_mtts;
+
+               mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
+       }
+
+       return 0;
+
+err_unmap_mtts:
+       while (--i >= 0) {
+               struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+
+               dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
+                                PCI_DMA_TODEVICE);
+       }
+       kfree(rq->mtt_no_align);
+err_free_wqe_info:
+       kfree(rq->wqe_info);
+
+err_out:
+       return -ENOMEM;
+}
+
+static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
+{
+       int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+       int mtt_sz = mlx5e_get_wqe_mtt_sz();
+       int i;
+
+       for (i = 0; i < wq_sz; i++) {
+               struct mlx5e_mpw_info *wi = &rq->wqe_info[i];
+
+               dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
+                                PCI_DMA_TODEVICE);
+       }
+       kfree(rq->mtt_no_align);
+       kfree(rq->wqe_info);
+}
+
 static int mlx5e_create_rq(struct mlx5e_channel *c,
                           struct mlx5e_rq_param *param,
                           struct mlx5e_rq *rq)
@@ -319,14 +423,16 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
 
        wq_sz = mlx5_wq_ll_get_size(&rq->wq);
 
+       rq->wq_type = priv->params.rq_wq_type;
+       rq->pdev    = c->pdev;
+       rq->netdev  = c->netdev;
+       rq->tstamp  = &priv->tstamp;
+       rq->channel = c;
+       rq->ix      = c->ix;
+       rq->priv    = c->priv;
+
        switch (priv->params.rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
-                                           GFP_KERNEL, cpu_to_node(c->cpu));
-               if (!rq->wqe_info) {
-                       err = -ENOMEM;
-                       goto err_rq_wq_destroy;
-               }
                rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
                rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
                rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
@@ -338,6 +444,10 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
                rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
                byte_count = rq->wqe_sz;
+               rq->mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
+               err = mlx5e_rq_alloc_mpwqe_info(rq, c);
+               if (err)
+                       goto err_rq_wq_destroy;
                break;
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
                rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
@@ -356,26 +466,21 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
                byte_count = rq->wqe_sz;
                byte_count |= MLX5_HW_START_PADDING;
+               rq->mkey_be = c->mkey_be;
        }
 
        for (i = 0; i < wq_sz; i++) {
                struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
 
                wqe->data.byte_count = cpu_to_be32(byte_count);
+               wqe->data.lkey = rq->mkey_be;
        }
 
        INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
        rq->am.mode = priv->params.rx_cq_period_mode;
 
-       rq->wq_type = priv->params.rq_wq_type;
-       rq->pdev    = c->pdev;
-       rq->netdev  = c->netdev;
-       rq->tstamp  = &priv->tstamp;
-       rq->channel = c;
-       rq->ix      = c->ix;
-       rq->priv    = c->priv;
-       rq->mkey_be = c->mkey_be;
-       rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
+       rq->page_cache.head = 0;
+       rq->page_cache.tail = 0;
 
        return 0;
 
@@ -387,14 +492,22 @@ err_rq_wq_destroy:
 
 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
 {
+       int i;
+
        switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               kfree(rq->wqe_info);
+               mlx5e_rq_free_mpwqe_info(rq);
                break;
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
                kfree(rq->skb);
        }
 
+       for (i = rq->page_cache.head; i != rq->page_cache.tail;
+            i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
+               struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
+
+               mlx5e_page_release(rq, dma_info, false);
+       }
        mlx5_wq_destroy(&rq->wq_ctrl);
 }
 
@@ -528,7 +641,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
 
        /* UMR WQE (if in progress) is always at wq->head */
        if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
-               mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+               mlx5e_free_rx_mpwqe(rq, &rq->wqe_info[wq->head]);
 
        while (!mlx5_wq_ll_is_empty(wq)) {
                wqe_ix_be = *wq->tail_next;
index b6f8ebb..dc86779 100644 (file)
@@ -200,7 +200,6 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
 
        *((dma_addr_t *)skb->cb) = dma_addr;
        wqe->data.addr = cpu_to_be64(dma_addr);
-       wqe->data.lkey = rq->mkey_be;
 
        rq->skb[ix] = skb;
 
@@ -231,44 +230,11 @@ static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
        return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
 }
 
-static inline void
-mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev,
-                               struct mlx5e_mpw_info *wi,
-                               u32 wqe_offset, u32 len)
-{
-       dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset,
-                               len, DMA_FROM_DEVICE);
-}
-
-static inline void
-mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev,
-                                   struct mlx5e_mpw_info *wi,
-                                   u32 wqe_offset, u32 len)
-{
-       /* No dma pre sync for fragmented MPWQE */
-}
-
-static inline void
-mlx5e_add_skb_frag_linear_mpwqe(struct mlx5e_rq *rq,
-                               struct sk_buff *skb,
-                               struct mlx5e_mpw_info *wi,
-                               u32 page_idx, u32 frag_offset,
-                               u32 len)
-{
-       unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
-
-       wi->skbs_frags[page_idx]++;
-       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-                       &wi->dma_info.page[page_idx], frag_offset,
-                       len, truesize);
-}
-
-static inline void
-mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
-                                   struct sk_buff *skb,
-                                   struct mlx5e_mpw_info *wi,
-                                   u32 page_idx, u32 frag_offset,
-                                   u32 len)
+static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
+                                           struct sk_buff *skb,
+                                           struct mlx5e_mpw_info *wi,
+                                           u32 page_idx, u32 frag_offset,
+                                           u32 len)
 {
        unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
 
@@ -282,24 +248,11 @@ mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
 }
 
 static inline void
-mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev,
-                                  struct sk_buff *skb,
-                                  struct mlx5e_mpw_info *wi,
-                                  u32 page_idx, u32 offset,
-                                  u32 headlen)
-{
-       struct page *page = &wi->dma_info.page[page_idx];
-
-       skb_copy_to_linear_data(skb, page_address(page) + offset,
-                               ALIGN(headlen, sizeof(long)));
-}
-
-static inline void
-mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
-                                      struct sk_buff *skb,
-                                      struct mlx5e_mpw_info *wi,
-                                      u32 page_idx, u32 offset,
-                                      u32 headlen)
+mlx5e_copy_skb_header_mpwqe(struct device *pdev,
+                           struct sk_buff *skb,
+                           struct mlx5e_mpw_info *wi,
+                           u32 page_idx, u32 offset,
+                           u32 headlen)
 {
        u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
        struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
@@ -324,46 +277,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
        }
 }
 
-static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
 {
-       return rq->mpwqe_mtt_offset +
-               wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
-}
-
-static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
-                               struct mlx5e_sq *sq,
-                               struct mlx5e_umr_wqe *wqe,
-                               u16 ix)
-{
-       struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
-       struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
-       struct mlx5_wqe_data_seg      *dseg = &wqe->data;
        struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
-       u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
-       u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
-
-       memset(wqe, 0, sizeof(*wqe));
-       cseg->opmod_idx_opcode =
-               cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
-                           MLX5_OPCODE_UMR);
-       cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
-                                     ds_cnt);
-       cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
-       cseg->imm       = rq->umr_mkey_be;
-
-       ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
-       ucseg->klm_octowords =
-               cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
-       ucseg->bsf_octowords =
-               cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
-       ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
-
-       dseg->lkey = sq->mkey_be;
-       dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
-}
-
-static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
-{
        struct mlx5e_sq *sq = &rq->channel->icosq;
        struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5e_umr_wqe *wqe;
@@ -378,129 +294,141 @@ static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
        }
 
        wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-       mlx5e_build_umr_wqe(rq, sq, wqe, ix);
+       memcpy(wqe, &wi->umr.wqe, sizeof(*wqe));
+       wqe->ctrl.opmod_idx_opcode =
+               cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+                           MLX5_OPCODE_UMR);
+
        sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR;
        sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs;
        sq->pc += num_wqebbs;
        mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
 }
 
-static inline int mlx5e_get_wqe_mtt_sz(void)
+static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
+                                     struct mlx5e_dma_info *dma_info)
 {
-       /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
-        * To avoid copying garbage after the mtt array, we allocate
-        * a little more.
-        */
-       return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
-                    MLX5_UMR_MTT_ALIGNMENT);
+       struct mlx5e_page_cache *cache = &rq->page_cache;
+       u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
+
+       if (tail_next == cache->head) {
+               rq->stats.cache_full++;
+               return false;
+       }
+
+       cache->page_cache[cache->tail] = *dma_info;
+       cache->tail = tail_next;
+       return true;
 }
 
-static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq,
-                                   struct mlx5e_mpw_info *wi,
-                                   int i)
+static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
+                                     struct mlx5e_dma_info *dma_info)
+{
+       struct mlx5e_page_cache *cache = &rq->page_cache;
+
+       if (unlikely(cache->head == cache->tail)) {
+               rq->stats.cache_empty++;
+               return false;
+       }
+
+       if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
+               rq->stats.cache_busy++;
+               return false;
+       }
+
+       *dma_info = cache->page_cache[cache->head];
+       cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
+       rq->stats.cache_reuse++;
+
+       dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE,
+                                  DMA_FROM_DEVICE);
+       return true;
+}
+
+static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
+                                         struct mlx5e_dma_info *dma_info)
 {
        struct page *page;
 
+       if (mlx5e_rx_cache_get(rq, dma_info))
+               return 0;
+
        page = dev_alloc_page();
        if (unlikely(!page))
                return -ENOMEM;
 
-       wi->umr.dma_info[i].page = page;
-       wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
-                                               PCI_DMA_FROMDEVICE);
-       if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) {
+       dma_info->page = page;
+       dma_info->addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
+                                     DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
                put_page(page);
                return -ENOMEM;
        }
-       wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR);
 
        return 0;
 }
 
-static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
-                                          struct mlx5e_rx_wqe *wqe,
-                                          u16 ix)
+void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
+                       bool recycle)
+{
+       if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
+               return;
+
+       dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, DMA_FROM_DEVICE);
+       put_page(dma_info->page);
+}
+
+static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
+                                   struct mlx5e_rx_wqe *wqe,
+                                   u16 ix)
 {
        struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
-       int mtt_sz = mlx5e_get_wqe_mtt_sz();
        u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
+       int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
+       int err;
        int i;
 
-       wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
-                                  MLX5_MPWRQ_PAGES_PER_WQE,
-                                  GFP_ATOMIC);
-       if (unlikely(!wi->umr.dma_info))
-               goto err_out;
-
-       /* We allocate more than mtt_sz as we will align the pointer */
-       wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1,
-                                      GFP_ATOMIC);
-       if (unlikely(!wi->umr.mtt_no_align))
-               goto err_free_umr;
-
-       wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN);
-       wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz,
-                                         PCI_DMA_TODEVICE);
-       if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr)))
-               goto err_free_mtt;
-
        for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
-               if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
+               struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
+
+               err = mlx5e_page_alloc_mapped(rq, dma_info);
+               if (unlikely(err))
                        goto err_unmap;
-               page_ref_add(wi->umr.dma_info[i].page,
-                            mlx5e_mpwqe_strides_per_page(rq));
+               wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
+               page_ref_add(dma_info->page, pg_strides);
                wi->skbs_frags[i] = 0;
        }
 
        wi->consumed_strides = 0;
-       wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe;
-       wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe;
-       wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe;
-       wi->free_wqe     = mlx5e_free_rx_fragmented_mpwqe;
-       wqe->data.lkey = rq->umr_mkey_be;
        wqe->data.addr = cpu_to_be64(dma_offset);
 
        return 0;
 
 err_unmap:
        while (--i >= 0) {
-               dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
-                              PCI_DMA_FROMDEVICE);
-               page_ref_sub(wi->umr.dma_info[i].page,
-                            mlx5e_mpwqe_strides_per_page(rq));
-               put_page(wi->umr.dma_info[i].page);
-       }
-       dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+               struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
 
-err_free_mtt:
-       kfree(wi->umr.mtt_no_align);
-
-err_free_umr:
-       kfree(wi->umr.dma_info);
+               page_ref_sub(dma_info->page, pg_strides);
+               mlx5e_page_release(rq, dma_info, true);
+       }
 
-err_out:
-       return -ENOMEM;
+       return err;
 }
 
-void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
-                                   struct mlx5e_mpw_info *wi)
+void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
 {
-       int mtt_sz = mlx5e_get_wqe_mtt_sz();
+       int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
        int i;
 
        for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
-               dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
-                              PCI_DMA_FROMDEVICE);
-               page_ref_sub(wi->umr.dma_info[i].page,
-                       mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
-               put_page(wi->umr.dma_info[i].page);
+               struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
+
+               page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
+               mlx5e_page_release(rq, dma_info, true);
        }
-       dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
-       kfree(wi->umr.mtt_no_align);
-       kfree(wi->umr.dma_info);
 }
 
-void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
+void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
 {
        struct mlx5_wq_ll *wq = &rq->wq;
        struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
@@ -508,12 +436,11 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
        clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
 
        if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
-               mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+               mlx5e_free_rx_mpwqe(rq, &rq->wqe_info[wq->head]);
                return;
        }
 
        mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
-       rq->stats.mpwqe_frag++;
 
        /* ensure wqes are visible to device before updating doorbell record */
        dma_wmb();
@@ -521,84 +448,23 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
        mlx5_wq_ll_update_db_record(wq);
 }
 
-static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
-                                      struct mlx5e_rx_wqe *wqe,
-                                      u16 ix)
-{
-       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
-       gfp_t gfp_mask;
-       int i;
-
-       gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC;
-       wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
-                                            MLX5_MPWRQ_WQE_PAGE_ORDER);
-       if (unlikely(!wi->dma_info.page))
-               return -ENOMEM;
-
-       wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0,
-                                        rq->wqe_sz, PCI_DMA_FROMDEVICE);
-       if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) {
-               put_page(wi->dma_info.page);
-               return -ENOMEM;
-       }
-
-       /* We split the high-order page into order-0 ones and manage their
-        * reference counter to minimize the memory held by small skb fragments
-        */
-       split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
-       for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
-               page_ref_add(&wi->dma_info.page[i],
-                            mlx5e_mpwqe_strides_per_page(rq));
-               wi->skbs_frags[i] = 0;
-       }
-
-       wi->consumed_strides = 0;
-       wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe;
-       wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe;
-       wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe;
-       wi->free_wqe     = mlx5e_free_rx_linear_mpwqe;
-       wqe->data.lkey = rq->mkey_be;
-       wqe->data.addr = cpu_to_be64(wi->dma_info.addr);
-
-       return 0;
-}
-
-void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
-                               struct mlx5e_mpw_info *wi)
-{
-       int i;
-
-       dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
-                      PCI_DMA_FROMDEVICE);
-       for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
-               page_ref_sub(&wi->dma_info.page[i],
-                       mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
-               put_page(&wi->dma_info.page[i]);
-       }
-}
-
-int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,        u16 ix)
 {
        int err;
 
-       err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix);
-       if (unlikely(err)) {
-               err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix);
-               if (unlikely(err))
-                       return err;
-               set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
-               mlx5e_post_umr_wqe(rq, ix);
-               return -EBUSY;
-       }
-
-       return 0;
+       err = mlx5e_alloc_rx_umr_mpwqe(rq, wqe, ix);
+       if (unlikely(err))
+               return err;
+       set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+       mlx5e_post_umr_wqe(rq, ix);
+       return -EBUSY;
 }
 
 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
 {
        struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
 
-       wi->free_wqe(rq, wi);
+       mlx5e_free_rx_mpwqe(rq, wi);
 }
 
 #define RQ_CANNOT_POST(rq) \
@@ -617,9 +483,10 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
                int err;
 
                err = rq->alloc_wqe(rq, wqe, wq->head);
+               if (err == -EBUSY)
+                       return true;
                if (unlikely(err)) {
-                       if (err != -EBUSY)
-                               rq->stats.buff_alloc_err++;
+                       rq->stats.buff_alloc_err++;
                        break;
                }
 
@@ -637,24 +504,32 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
                                 u32 cqe_bcnt)
 {
-       struct ethhdr   *eth    = (struct ethhdr *)(skb->data);
-       struct iphdr    *ipv4   = (struct iphdr *)(skb->data + ETH_HLEN);
-       struct ipv6hdr  *ipv6   = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+       struct ethhdr   *eth = (struct ethhdr *)(skb->data);
+       struct iphdr    *ipv4;
+       struct ipv6hdr  *ipv6;
        struct tcphdr   *tcp;
+       int network_depth = 0;
+       __be16 proto;
+       u16 tot_len;
 
        u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
        int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
                       (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
 
-       u16 tot_len = cqe_bcnt - ETH_HLEN;
+       skb->mac_len = ETH_HLEN;
+       proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+
+       ipv4 = (struct iphdr *)(skb->data + network_depth);
+       ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
+       tot_len = cqe_bcnt - network_depth;
 
-       if (eth->h_proto == htons(ETH_P_IP)) {
-               tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+       if (proto == htons(ETH_P_IP)) {
+               tcp = (struct tcphdr *)(skb->data + network_depth +
                                        sizeof(struct iphdr));
                ipv6 = NULL;
                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
        } else {
-               tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+               tcp = (struct tcphdr *)(skb->data + network_depth +
                                        sizeof(struct ipv6hdr));
                ipv4 = NULL;
                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
@@ -823,7 +698,6 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
                                           u32 cqe_bcnt,
                                           struct sk_buff *skb)
 {
-       u32 consumed_bytes = ALIGN(cqe_bcnt, rq->mpwqe_stride_sz);
        u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
        u32 wqe_offset     = stride_ix * rq->mpwqe_stride_sz;
        u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
@@ -837,21 +711,20 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
                page_idx++;
                frag_offset -= PAGE_SIZE;
        }
-       wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes);
 
        while (byte_cnt) {
                u32 pg_consumed_bytes =
                        min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
 
-               wi->add_skb_frag(rq, skb, wi, page_idx, frag_offset,
-                                pg_consumed_bytes);
+               mlx5e_add_skb_frag_mpwqe(rq, skb, wi, page_idx, frag_offset,
+                                        pg_consumed_bytes);
                byte_cnt -= pg_consumed_bytes;
                frag_offset = 0;
                page_idx++;
        }
        /* copy header */
-       wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset,
-                           headlen);
+       mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, wi, head_page_idx,
+                                   head_offset, headlen);
        /* skb linear part was allocated with headlen and aligned to long */
        skb->tail += headlen;
        skb->len  += headlen;
@@ -896,7 +769,7 @@ mpwrq_cqe_out:
        if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
                return;
 
-       wi->free_wqe(rq, wi);
+       mlx5e_free_rx_mpwqe(rq, wi);
        mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
 }
 
index 499487c..6af8d79 100644 (file)
@@ -73,10 +73,13 @@ struct mlx5e_sw_stats {
        u64 tx_xmit_more;
        u64 rx_wqe_err;
        u64 rx_mpwqe_filler;
-       u64 rx_mpwqe_frag;
        u64 rx_buff_alloc_err;
        u64 rx_cqe_compress_blks;
        u64 rx_cqe_compress_pkts;
+       u64 rx_cache_reuse;
+       u64 rx_cache_full;
+       u64 rx_cache_empty;
+       u64 rx_cache_busy;
 
        /* Special handling counters */
        u64 link_down_events_phy;
@@ -105,10 +108,13 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
 };
 
@@ -274,10 +280,13 @@ struct mlx5e_rq_stats {
        u64 lro_bytes;
        u64 wqe_err;
        u64 mpwqe_filler;
-       u64 mpwqe_frag;
        u64 buff_alloc_err;
        u64 cqe_compress_blks;
        u64 cqe_compress_pkts;
+       u64 cache_reuse;
+       u64 cache_full;
+       u64 cache_empty;
+       u64 cache_busy;
 };
 
 static const struct counter_desc rq_stats_desc[] = {
@@ -290,10 +299,13 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
-       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
 };
 
 struct mlx5e_sq_stats {
index 988eca9..eb0e725 100644 (file)
@@ -356,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                sq->stats.stopped++;
        }
 
+       sq->stats.xmit_more += skb->xmit_more;
        if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
                int bf_sz = 0;
 
@@ -375,7 +376,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
 
        sq->stats.packets++;
        sq->stats.bytes += num_bytes;
-       sq->stats.xmit_more += skb->xmit_more;
        return NETDEV_TX_OK;
 
 dma_unmap_wqe_err:
index 9bf33bb..08d8b0c 100644 (file)
@@ -87,7 +87,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
                case MLX5_OPCODE_NOP:
                        break;
                case MLX5_OPCODE_UMR:
-                       mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq);
+                       mlx5e_post_rx_mpwqe(&sq->channel->rq);
                        break;
                default:
                        WARN_ONCE(true,
index b83d0a7..4e2354c 100644 (file)
@@ -2138,6 +2138,18 @@ MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
  */
 MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
 
+enum {
+       MLXSW_REG_PTYS_AN_STATUS_NA,
+       MLXSW_REG_PTYS_AN_STATUS_OK,
+       MLXSW_REG_PTYS_AN_STATUS_FAIL,
+};
+
+/* reg_ptys_an_status
+ * Autonegotiation status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
+
 #define MLXSW_REG_PTYS_ETH_SPEED_SGMII                 BIT(0)
 #define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX           BIT(1)
 #define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4           BIT(2)
@@ -2152,6 +2164,7 @@ MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
 #define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR         BIT(14)
 #define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4           BIT(15)
 #define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4       BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2           BIT(18)
 #define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4           BIT(19)
 #define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4          BIT(20)
 #define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4          BIT(21)
@@ -2184,6 +2197,13 @@ MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
  */
 MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
 
+/* reg_ptys_eth_proto_lp_advertise
+ * The protocols that were advertised by the link partner during
+ * autonegotiation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32);
+
 static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
                                       u32 proto_admin)
 {
index 6c6b726..27bbcaf 100644 (file)
@@ -56,6 +56,7 @@
 #include <generated/utsrelease.h>
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_mirred.h>
+#include <net/netevent.h>
 
 #include "spectrum.h"
 #include "core.h"
@@ -1598,112 +1599,149 @@ static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
 }
 
 struct mlxsw_sp_port_link_mode {
+       enum ethtool_link_mode_bit_indices mask_ethtool;
        u32 mask;
-       u32 supported;
-       u32 advertised;
        u32 speed;
 };
 
 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
        {
                .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
-               .supported      = SUPPORTED_100baseT_Full,
-               .advertised     = ADVERTISED_100baseT_Full,
-               .speed          = 100,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
-               .speed          = 100,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+               .speed          = SPEED_100,
        },
        {
                .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
                                  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
-               .supported      = SUPPORTED_1000baseKX_Full,
-               .advertised     = ADVERTISED_1000baseKX_Full,
-               .speed          = 1000,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+               .speed          = SPEED_1000,
        },
        {
                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
-               .supported      = SUPPORTED_10000baseT_Full,
-               .advertised     = ADVERTISED_10000baseT_Full,
-               .speed          = 10000,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+               .speed          = SPEED_10000,
        },
        {
                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
-               .supported      = SUPPORTED_10000baseKX4_Full,
-               .advertised     = ADVERTISED_10000baseKX4_Full,
-               .speed          = 10000,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+               .speed          = SPEED_10000,
        },
        {
                .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
                                  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
-               .supported      = SUPPORTED_10000baseKR_Full,
-               .advertised     = ADVERTISED_10000baseKR_Full,
-               .speed          = 10000,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+               .speed          = SPEED_10000,
        },
        {
                .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
-               .supported      = SUPPORTED_20000baseKR2_Full,
-               .advertised     = ADVERTISED_20000baseKR2_Full,
-               .speed          = 20000,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+               .speed          = SPEED_20000,
        },
        {
                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
-               .supported      = SUPPORTED_40000baseCR4_Full,
-               .advertised     = ADVERTISED_40000baseCR4_Full,
-               .speed          = 40000,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+               .speed          = SPEED_40000,
        },
        {
                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
-               .supported      = SUPPORTED_40000baseKR4_Full,
-               .advertised     = ADVERTISED_40000baseKR4_Full,
-               .speed          = 40000,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+               .speed          = SPEED_40000,
        },
        {
                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
-               .supported      = SUPPORTED_40000baseSR4_Full,
-               .advertised     = ADVERTISED_40000baseSR4_Full,
-               .speed          = 40000,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+               .speed          = SPEED_40000,
        },
        {
                .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
-               .supported      = SUPPORTED_40000baseLR4_Full,
-               .advertised     = ADVERTISED_40000baseLR4_Full,
-               .speed          = 40000,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+               .speed          = SPEED_40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+               .speed          = SPEED_25000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+               .speed          = SPEED_25000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+               .speed          = SPEED_25000,
        },
        {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
-                                 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
-                                 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
-               .speed          = 25000,
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+               .speed          = SPEED_25000,
        },
        {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
-                                 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
-                                 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
-               .speed          = 50000,
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+               .speed          = SPEED_50000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+               .speed          = SPEED_50000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+               .speed          = SPEED_50000,
        },
        {
                .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
-               .supported      = SUPPORTED_56000baseKR4_Full,
-               .advertised     = ADVERTISED_56000baseKR4_Full,
-               .speed          = 56000,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+               .speed          = SPEED_56000,
        },
        {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
-                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
-                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
-                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
-               .speed          = 100000,
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
+               .speed          = SPEED_56000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
+               .speed          = SPEED_56000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+               .speed          = SPEED_56000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+               .speed          = SPEED_100000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+               .speed          = SPEED_100000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+               .speed          = SPEED_100000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+               .speed          = SPEED_100000,
        },
 };
 
 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
 
-static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
+static void
+mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
+                                 struct ethtool_link_ksettings *cmd)
 {
        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
@@ -1711,43 +1749,29 @@ static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
                              MLXSW_REG_PTYS_ETH_SPEED_SGMII))
-               return SUPPORTED_FIBRE;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
 
        if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
                              MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
                              MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
                              MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
                              MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
-               return SUPPORTED_Backplane;
-       return 0;
-}
-
-static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
-{
-       u32 modes = 0;
-       int i;
-
-       for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
-               if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
-                       modes |= mlxsw_sp_port_link_mode[i].supported;
-       }
-       return modes;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
 }
 
-static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
+static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
 {
-       u32 modes = 0;
        int i;
 
        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
                if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
-                       modes |= mlxsw_sp_port_link_mode[i].advertised;
+                       __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
+                                 mode);
        }
-       return modes;
 }
 
 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
-                                           struct ethtool_cmd *cmd)
+                                           struct ethtool_link_ksettings *cmd)
 {
        u32 speed = SPEED_UNKNOWN;
        u8 duplex = DUPLEX_UNKNOWN;
@@ -1764,8 +1788,8 @@ static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
                }
        }
 out:
-       ethtool_cmd_speed_set(cmd, speed);
-       cmd->duplex = duplex;
+       cmd->base.speed = speed;
+       cmd->base.duplex = duplex;
 }
 
 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
@@ -1790,49 +1814,15 @@ static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
        return PORT_OTHER;
 }
 
-static int mlxsw_sp_port_get_settings(struct net_device *dev,
-                                     struct ethtool_cmd *cmd)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       char ptys_pl[MLXSW_REG_PTYS_LEN];
-       u32 eth_proto_cap;
-       u32 eth_proto_admin;
-       u32 eth_proto_oper;
-       int err;
-
-       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
-       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
-       if (err) {
-               netdev_err(dev, "Failed to get proto");
-               return err;
-       }
-       mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
-                             &eth_proto_admin, &eth_proto_oper);
-
-       cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
-                        mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
-                        SUPPORTED_Pause | SUPPORTED_Asym_Pause |
-                        SUPPORTED_Autoneg;
-       cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
-       mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
-                                       eth_proto_oper, cmd);
-
-       eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
-       cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
-       cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
-
-       cmd->transceiver = XCVR_INTERNAL;
-       return 0;
-}
-
-static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
+static u32
+mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
 {
        u32 ptys_proto = 0;
        int i;
 
        for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
-               if (advertising & mlxsw_sp_port_link_mode[i].advertised)
+               if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
+                            cmd->link_modes.advertising))
                        ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
        }
        return ptys_proto;
@@ -1862,61 +1852,113 @@ static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
        return ptys_proto;
 }
 
-static int mlxsw_sp_port_set_settings(struct net_device *dev,
-                                     struct ethtool_cmd *cmd)
+static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
+                                            struct ethtool_link_ksettings *cmd)
+{
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+
+       mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
+       mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
+}
+
+static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
+                                            struct ethtool_link_ksettings *cmd)
+{
+       if (!autoneg)
+               return;
+
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+       mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
+}
+
+static void
+mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
+                                   struct ethtool_link_ksettings *cmd)
+{
+       if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
+               return;
+
+       ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
+       mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
+}
+
+static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
+                                           struct ethtool_link_ksettings *cmd)
 {
+       u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        char ptys_pl[MLXSW_REG_PTYS_LEN];
-       u32 speed;
-       u32 eth_proto_new;
-       u32 eth_proto_cap;
-       u32 eth_proto_admin;
+       u8 autoneg_status;
+       bool autoneg;
        int err;
 
-       speed = ethtool_cmd_speed(cmd);
+       autoneg = mlxsw_sp_port->link.autoneg;
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+       if (err)
+               return err;
+       mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
+                             &eth_proto_oper);
+
+       mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
 
-       eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
-               mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
-               mlxsw_sp_to_ptys_speed(speed);
+       mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
+
+       eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
+       autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
+       mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
+
+       cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+       cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
+       mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
+                                       cmd);
+
+       return 0;
+}
+
+static int
+mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
+                                const struct ethtool_link_ksettings *cmd)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char ptys_pl[MLXSW_REG_PTYS_LEN];
+       u32 eth_proto_cap, eth_proto_new;
+       bool autoneg;
+       int err;
 
        mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
        err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
-       if (err) {
-               netdev_err(dev, "Failed to get proto");
+       if (err)
                return err;
-       }
-       mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+       mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
+
+       autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
+       eth_proto_new = autoneg ?
+               mlxsw_sp_to_ptys_advert_link(cmd) :
+               mlxsw_sp_to_ptys_speed(cmd->base.speed);
 
        eth_proto_new = eth_proto_new & eth_proto_cap;
        if (!eth_proto_new) {
-               netdev_err(dev, "Not supported proto admin requested");
+               netdev_err(dev, "No supported speed requested\n");
                return -EINVAL;
        }
-       if (eth_proto_new == eth_proto_admin)
-               return 0;
 
        mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
-       if (err) {
-               netdev_err(dev, "Failed to set proto admin");
+       if (err)
                return err;
-       }
 
        if (!netif_running(dev))
                return 0;
 
-       err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
-       if (err) {
-               netdev_err(dev, "Failed to set admin status");
-               return err;
-       }
+       mlxsw_sp_port->link.autoneg = autoneg;
 
-       err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
-       if (err) {
-               netdev_err(dev, "Failed to set admin status");
-               return err;
-       }
+       mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+       mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
 
        return 0;
 }
@@ -1930,8 +1972,8 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
        .set_phys_id            = mlxsw_sp_port_set_phys_id,
        .get_ethtool_stats      = mlxsw_sp_port_get_stats,
        .get_sset_count         = mlxsw_sp_port_get_sset_count,
-       .get_settings           = mlxsw_sp_port_get_settings,
-       .set_settings           = mlxsw_sp_port_set_settings,
+       .get_link_ksettings     = mlxsw_sp_port_get_link_ksettings,
+       .set_link_ksettings     = mlxsw_sp_port_set_link_ksettings,
 };
 
 static int
@@ -2081,6 +2123,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        mlxsw_sp_port->mapping.module = module;
        mlxsw_sp_port->mapping.width = width;
        mlxsw_sp_port->mapping.lane = lane;
+       mlxsw_sp_port->link.autoneg = 1;
        bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
        mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
        if (!mlxsw_sp_port->active_vlans) {
@@ -2105,6 +2148,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
        dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
 
+       err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+                       mlxsw_sp_port->local_port);
+               goto err_port_swid_set;
+       }
+
        err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -2130,13 +2180,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                goto err_port_system_port_mapping_set;
        }
 
-       err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
-       if (err) {
-               dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
-                       mlxsw_sp_port->local_port);
-               goto err_port_swid_set;
-       }
-
        err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
@@ -2218,10 +2261,10 @@ err_port_buffers_init:
 err_port_admin_status_set:
 err_port_mtu_set:
 err_port_speed_by_width_set:
-       mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
-err_port_swid_set:
 err_port_system_port_mapping_set:
 err_dev_addr_init:
+       mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
        free_percpu(mlxsw_sp_port->pcpu_stats);
 err_alloc_stats:
        kfree(mlxsw_sp_port->untagged_vlans);
@@ -4465,18 +4508,26 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
        .priority = 10, /* Must be called before FIB notifier block */
 };
 
+static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
+       .notifier_call = mlxsw_sp_router_netevent_event,
+};
+
 static int __init mlxsw_sp_module_init(void)
 {
        int err;
 
        register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
        register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
+       register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+
        err = mlxsw_core_driver_register(&mlxsw_sp_driver);
        if (err)
                goto err_core_driver_register;
        return 0;
 
 err_core_driver_register:
+       unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+       unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
        unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
        return err;
 }
@@ -4484,6 +4535,7 @@ err_core_driver_register:
 static void __exit mlxsw_sp_module_exit(void)
 {
        mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+       unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
        unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
        unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
 }
index 01537d3..969c250 100644 (file)
@@ -341,7 +341,8 @@ struct mlxsw_sp_port {
        } vport;
        struct {
                u8 tx_pause:1,
-                  rx_pause:1;
+                  rx_pause:1,
+                  autoneg:1;
        } link;
        struct {
                struct ieee_ets *ets;
@@ -590,6 +591,8 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
                                    struct neighbour *n);
 void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
                                   struct neighbour *n);
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+                                  unsigned long event, void *ptr);
 
 int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
index 917ddd1..3f5c51d 100644 (file)
@@ -107,6 +107,7 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
 }
 
 struct mlxsw_sp_fib_key {
+       struct net_device *dev;
        unsigned char addr[sizeof(struct in6_addr)];
        unsigned char prefix_len;
 };
@@ -123,7 +124,7 @@ struct mlxsw_sp_fib_entry {
        struct rhash_head ht_node;
        struct mlxsw_sp_fib_key key;
        enum mlxsw_sp_fib_entry_type type;
-       u8 added:1;
+       unsigned int ref_count;
        u16 rif; /* used for action local */
        struct mlxsw_sp_vr *vr;
        struct list_head nexthop_group_node;
@@ -171,13 +172,15 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
 
 static struct mlxsw_sp_fib_entry *
 mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
-                         size_t addr_len, unsigned char prefix_len)
+                         size_t addr_len, unsigned char prefix_len,
+                         struct net_device *dev)
 {
        struct mlxsw_sp_fib_entry *fib_entry;
 
        fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
        if (!fib_entry)
                return NULL;
+       fib_entry->key.dev = dev;
        memcpy(fib_entry->key.addr, addr, addr_len);
        fib_entry->key.prefix_len = prefix_len;
        return fib_entry;
@@ -190,10 +193,13 @@ static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
 
 static struct mlxsw_sp_fib_entry *
 mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
-                         size_t addr_len, unsigned char prefix_len)
+                         size_t addr_len, unsigned char prefix_len,
+                         struct net_device *dev)
 {
-       struct mlxsw_sp_fib_key key = {{ 0 } };
+       struct mlxsw_sp_fib_key key;
 
+       memset(&key, 0, sizeof(key));
+       key.dev = dev;
        memcpy(key.addr, addr, addr_len);
        key.prefix_len = prefix_len;
        return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
@@ -938,8 +944,8 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
        mlxsw_sp_port_dev_put(mlxsw_sp_port);
 }
 
-static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
-                                         unsigned long event, void *ptr)
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+                                  unsigned long event, void *ptr)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry;
        struct mlxsw_sp_port *mlxsw_sp_port;
@@ -1009,10 +1015,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
        return NOTIFY_DONE;
 }
 
-static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
-       .notifier_call = mlxsw_sp_router_netevent_event,
-};
-
 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
 {
        int err;
@@ -1027,10 +1029,6 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
         */
        mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
 
-       err = register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
-       if (err)
-               goto err_register_netevent_notifier;
-
        /* Create the delayed works for the activity_update */
        INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
                          mlxsw_sp_router_neighs_update_work);
@@ -1039,17 +1037,12 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
        mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
        mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
        return 0;
-
-err_register_netevent_notifier:
-       rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
-       return err;
 }
 
 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
 {
        cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
        cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
-       unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
        rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
 }
 
@@ -1524,7 +1517,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
                return err;
        mlxsw_sp_lpm_init(mlxsw_sp);
        mlxsw_sp_vrs_init(mlxsw_sp);
-       return mlxsw_sp_neigh_init(mlxsw_sp);
+       err = mlxsw_sp_neigh_init(mlxsw_sp);
+       if (err)
+               goto err_neigh_init;
+       return 0;
+
+err_neigh_init:
+       __mlxsw_sp_router_fini(mlxsw_sp);
+       return err;
 }
 
 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1626,11 +1626,8 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
                                     struct mlxsw_sp_fib_entry *fib_entry)
 {
-       enum mlxsw_reg_ralue_op op;
-
-       op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
-                                MLXSW_REG_RALUE_OP_WRITE_UPDATE;
-       return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
+       return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+                                    MLXSW_REG_RALUE_OP_WRITE_WRITE);
 }
 
 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
@@ -1695,34 +1692,93 @@ mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
        mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
 }
 
-static int
-mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
-                                const struct switchdev_obj_ipv4_fib *fib4,
-                                struct switchdev_trans *trans)
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
+                      const struct switchdev_obj_ipv4_fib *fib4)
 {
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       struct mlxsw_sp_router_fib4_add_info *info;
        struct mlxsw_sp_fib_entry *fib_entry;
+       struct fib_info *fi = fib4->fi;
        struct mlxsw_sp_vr *vr;
        int err;
 
        vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
                             MLXSW_SP_L3_PROTO_IPV4);
        if (IS_ERR(vr))
-               return PTR_ERR(vr);
+               return ERR_CAST(vr);
 
+       fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+                                             sizeof(fib4->dst),
+                                             fib4->dst_len, fi->fib_dev);
+       if (fib_entry) {
+               /* Already exists, just take a reference */
+               fib_entry->ref_count++;
+               return fib_entry;
+       }
        fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
-                                             sizeof(fib4->dst), fib4->dst_len);
+                                             sizeof(fib4->dst),
+                                             fib4->dst_len, fi->fib_dev);
        if (!fib_entry) {
                err = -ENOMEM;
                goto err_fib_entry_create;
        }
        fib_entry->vr = vr;
+       fib_entry->ref_count = 1;
 
        err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
        if (err)
                goto err_fib4_entry_init;
 
+       return fib_entry;
+
+err_fib4_entry_init:
+       mlxsw_sp_fib_entry_destroy(fib_entry);
+err_fib_entry_create:
+       mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+       return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
+                       const struct switchdev_obj_ipv4_fib *fib4)
+{
+       struct mlxsw_sp_vr *vr;
+
+       vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+       if (!vr)
+               return NULL;
+
+       return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+                                        sizeof(fib4->dst), fib4->dst_len,
+                                        fib4->fi->fib_dev);
+}
+
+void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+                           struct mlxsw_sp_fib_entry *fib_entry)
+{
+       struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+       if (--fib_entry->ref_count == 0) {
+               mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
+               mlxsw_sp_fib_entry_destroy(fib_entry);
+       }
+       mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+static int
+mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
+                                const struct switchdev_obj_ipv4_fib *fib4,
+                                struct switchdev_trans *trans)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_router_fib4_add_info *info;
+       struct mlxsw_sp_fib_entry *fib_entry;
+       int err;
+
+       fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
+       if (IS_ERR(fib_entry))
+               return PTR_ERR(fib_entry);
+
        info = kmalloc(sizeof(*info), GFP_KERNEL);
        if (!info) {
                err = -ENOMEM;
@@ -1736,11 +1792,7 @@ mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
        return 0;
 
 err_alloc_info:
-       mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-err_fib4_entry_init:
-       mlxsw_sp_fib_entry_destroy(fib_entry);
-err_fib_entry_create:
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+       mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
        return err;
 }
 
@@ -1759,11 +1811,14 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
        fib_entry = info->fib_entry;
        kfree(info);
 
+       if (fib_entry->ref_count != 1)
+               return 0;
+
        vr = fib_entry->vr;
-       err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry);
+       err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
        if (err)
                goto err_fib_entry_insert;
-       err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+       err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
        if (err)
                goto err_fib_entry_add;
        return 0;
@@ -1771,9 +1826,7 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
 err_fib_entry_add:
        mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
 err_fib_entry_insert:
-       mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-       mlxsw_sp_fib_entry_destroy(fib_entry);
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+       mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
        return err;
 }
 
@@ -1793,23 +1846,18 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        struct mlxsw_sp_fib_entry *fib_entry;
-       struct mlxsw_sp_vr *vr;
 
-       vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
-       if (!vr) {
-               dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
-               return -ENOENT;
-       }
-       fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
-                                             sizeof(fib4->dst), fib4->dst_len);
+       fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
        if (!fib_entry) {
                dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
                return -ENOENT;
        }
-       mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry);
-       mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
-       mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-       mlxsw_sp_fib_entry_destroy(fib_entry);
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+       if (fib_entry->ref_count == 1) {
+               mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+               mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
+       }
+
+       mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
        return 0;
 }
index 0c3fbbc..7186c48 100644 (file)
@@ -167,8 +167,8 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
 }
 
 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
-                                    u16 idx_begin, u16 idx_end, bool set,
-                                    bool only_uc)
+                                    u16 idx_begin, u16 idx_end, bool uc_set,
+                                    bool bm_set)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        u16 local_port = mlxsw_sp_port->local_port;
@@ -187,28 +187,22 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
                return -ENOMEM;
 
        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
-                           table_type, range, local_port, set);
+                           table_type, range, local_port, uc_set);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
        if (err)
                goto buffer_out;
 
-       /* Flooding control allows one to decide whether a given port will
-        * flood unicast traffic for which there is no FDB entry.
-        */
-       if (only_uc)
-               goto buffer_out;
-
        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
-                           table_type, range, local_port, set);
+                           table_type, range, local_port, bm_set);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
        if (err)
                goto err_flood_bm_set;
-       else
-               goto buffer_out;
+
+       goto buffer_out;
 
 err_flood_bm_set:
        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
-                           table_type, range, local_port, !set);
+                           table_type, range, local_port, !uc_set);
        mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
 buffer_out:
        kfree(sftr_pl);
@@ -257,8 +251,7 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
         * the start of the vFIDs range.
         */
        vfid = mlxsw_sp_fid_to_vfid(fid);
-       return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
-                                        false);
+       return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
 }
 
 static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -501,6 +494,9 @@ static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
 {
        struct mlxsw_sp_fid *f;
 
+       if (test_bit(fid, mlxsw_sp_port->active_vlans))
+               return 0;
+
        f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
        if (!f) {
                f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
@@ -558,7 +554,7 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
        }
 
        err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
-                                       true, false);
+                                       mlxsw_sp_port->uc_flood, true);
        if (err)
                goto err_port_flood_set;
 
index eb807b0..569ade6 100644 (file)
@@ -134,7 +134,7 @@ static int lnksts = 0;              /* CFG_LNKSTS bit polarity */
 
 /* tunables */
 #define RX_BUF_SIZE    1500    /* 8192 */
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
 #define NS83820_VLAN_ACCEL_SUPPORT
 #endif
 
index 88678c1..252e492 100644 (file)
@@ -41,7 +41,6 @@
  *          Chris Telfer <chris.telfer@netronome.com>
  */
 
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -1441,10 +1440,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
 
                nfp_net_set_hash(nn->netdev, skb, rxd);
 
-               /* Pad small frames to minimum */
-               if (skb_put_padto(skb, 60))
-                       break;
-
                /* Stats update */
                u64_stats_update_begin(&r_vec->rx_sync);
                r_vec->rx_pkts++;
index 7d7933d..4c98972 100644 (file)
@@ -40,7 +40,6 @@
  *          Brad Petrus <brad.petrus@netronome.com>
  */
 
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
index 37abef0..f7062cb 100644 (file)
@@ -38,7 +38,6 @@
  *         Rolf Neugebauer <rolf.neugebauer@netronome.com>
  */
 
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -134,7 +133,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        }
 
        nfp_net_get_fw_version(&fw_ver, ctrl_bar);
-       if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+       if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
                dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
                        fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
                err = -EINVAL;
@@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        }
 
        /* Determine stride */
-       if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) ||
-           nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) ||
-           nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) {
+       if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
                stride = 2;
                tx_bar_no = NFP_NET_Q0_BAR;
                rx_bar_no = NFP_NET_Q1_BAR;
index 12c399b..130da1c 100644 (file)
@@ -19,6 +19,7 @@
 #include "qed_dcbx.h"
 #include "qed_hsi.h"
 #include "qed_sp.h"
+#include "qed_sriov.h"
 #ifdef CONFIG_DCB
 #include <linux/qed/qed_eth_if.h>
 #endif
@@ -942,6 +943,9 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
        struct qed_ptt *p_ptt;
        int rc;
 
+       if (IS_VF(p_hwfn->cdev))
+               return -EINVAL;
+
        p_ptt = qed_ptt_acquire(p_hwfn);
        if (!p_ptt)
                return -EBUSY;
@@ -981,6 +985,7 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
                if (p_params->pfc.prio[i])
                        pfc_map |= BIT(i);
 
+       *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
        *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
 
        DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
@@ -1055,24 +1060,33 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
 
        for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
                entry = &p_app->app_pri_tbl[i].entry;
+               *entry = 0;
                if (ieee) {
-                       *entry &= ~DCBX_APP_SF_IEEE_MASK;
+                       *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
                        switch (p_params->app_entry[i].sf_ieee) {
                        case QED_DCBX_SF_IEEE_ETHTYPE:
                                *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
                                           DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+                                          DCBX_APP_SF_SHIFT);
                                break;
                        case QED_DCBX_SF_IEEE_TCP_PORT:
                                *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
                                           DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
                                break;
                        case QED_DCBX_SF_IEEE_UDP_PORT:
                                *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
                                           DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
                                break;
                        case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
                                *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
                                           DCBX_APP_SF_IEEE_SHIFT);
+                               *entry |= ((u32)DCBX_APP_SF_PORT <<
+                                          DCBX_APP_SF_SHIFT);
                                break;
                        }
                } else {
@@ -1172,7 +1186,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
                return 0;
        }
 
-       dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
        if (!dcbx_info)
                return -ENOMEM;
 
@@ -1207,7 +1221,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
 {
        struct qed_dcbx_get *dcbx_info;
 
-       dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+       dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
        if (!dcbx_info)
                return NULL;
 
index 578bbec..cd23a29 100644 (file)
@@ -2552,7 +2552,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
        edev->ops->register_ops(cdev, &qede_ll_ops, edev);
 
 #ifdef CONFIG_DCB
-       qede_set_dcbnl_ops(edev->ndev);
+       if (!IS_VF(edev))
+               qede_set_dcbnl_ops(edev->ndev);
 #endif
 
        INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
index 56e0a9f..e47d387 100644 (file)
@@ -722,7 +722,6 @@ static int emac_remove(struct platform_device *pdev)
 
        mdiobus_unregister(adpt->mii_bus);
        free_netdev(netdev);
-       dev_set_drvdata(&pdev->dev, NULL);
 
        return 0;
 }
@@ -731,7 +730,6 @@ static struct platform_driver emac_platform_driver = {
        .probe  = emac_probe,
        .remove = emac_remove,
        .driver = {
-               .owner          = THIS_MODULE,
                .name           = "qcom-emac",
                .of_match_table = emac_dt_match,
        },
index cb29ee2..5ef5d72 100644 (file)
@@ -1062,14 +1062,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* this should always be supported */
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
        if (err) {
-               dev_err(&pdev->dev, "32-bit PCI DMA addresses"
-                               "not supported by the card\n");
+               dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
                goto err_out_disable_dev;
        }
        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
        if (err) {
-               dev_err(&pdev->dev, "32-bit PCI DMA addresses"
-                               "not supported by the card\n");
+               dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
                goto err_out_disable_dev;
        }
 
index 1f8240a..440ae27 100644 (file)
@@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
 
        [ARSTR]         = 0x0000,
        [TSU_CTRST]     = 0x0004,
+       [TSU_FWSLC]     = 0x0038,
        [TSU_VTAG0]     = 0x0058,
        [TSU_ADSBSY]    = 0x0060,
        [TSU_TEN]       = 0x0064,
+       [TSU_POST1]     = 0x0070,
+       [TSU_POST2]     = 0x0074,
+       [TSU_POST3]     = 0x0078,
+       [TSU_POST4]     = 0x007c,
        [TSU_ADRH0]     = 0x0100,
 
        [TXNLCR0]       = 0x0080,
@@ -2781,6 +2786,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
 {
        if (sh_eth_is_rz_fast_ether(mdp)) {
                sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+               sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
+                                TSU_FWSLC);    /* Enable POST registers */
                return;
        }
 
index 1736f4b..f6883b2 100644 (file)
@@ -64,7 +64,7 @@
 #define LM87_ALARM_TEMP_INT            0x10
 #define LM87_ALARM_TEMP_EXT1           0x20
 
-#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
+#if IS_ENABLED(CONFIG_SENSORS_LM87)
 
 static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
 {
@@ -455,7 +455,7 @@ static int sfe4001_init(struct efx_nic *efx)
        struct falcon_board *board = falcon_board(efx);
        int rc;
 
-#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
+#if IS_ENABLED(CONFIG_SENSORS_LM90)
        board->hwmon_client =
                i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
 #else
index 95001ee..6f85276 100644 (file)
@@ -1426,7 +1426,7 @@ static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
                rx_flags |= RxATX;
        }
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
        /* Can accept Jumbo packet */
        rx_flags |= RxAJAB;
 #endif
@@ -1750,7 +1750,7 @@ static int sis900_rx(struct net_device *net_dev)
                data_size = rx_status & DSIZE;
                rx_size = data_size - CRC_SIZE;
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
                /* ``TOOLONG'' flag means jumbo packet received. */
                if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
                        rx_status &= (~ ((unsigned int)TOOLONG));
index 7d430d3..f0da3dc 100644 (file)
@@ -310,7 +310,7 @@ enum sis630_revision_id {
 #define CRC_SIZE                4
 #define MAC_HEADER_SIZE         14
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
 #define MAX_FRAME_SIZE  (1518 + 4)
 #else
 #define MAX_FRAME_SIZE  1518
index e17671c..ea84654 100644 (file)
@@ -470,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
 #endif
 
 #if ! SMC_CAN_USE_8BIT
+#undef SMC_inb
 #define SMC_inb(ioaddr, reg)           ({ BUG(); 0; })
+#undef SMC_outb
 #define SMC_outb(x, ioaddr, reg)       BUG()
 #define SMC_insb(a, r, p, l)           BUG()
 #define SMC_outsb(a, r, p, l)          BUG()
index 8ab8d4b..e9b8579 100644 (file)
@@ -1108,15 +1108,8 @@ static int smsc911x_mii_init(struct platform_device *pdev,
                goto err_out_free_bus_2;
        }
 
-       if (smsc911x_mii_probe(dev) < 0) {
-               SMSC_WARN(pdata, probe, "Error registering mii bus");
-               goto err_out_unregister_bus_3;
-       }
-
        return 0;
 
-err_out_unregister_bus_3:
-       mdiobus_unregister(pdata->mii_bus);
 err_out_free_bus_2:
        mdiobus_free(pdata->mii_bus);
 err_out_1:
@@ -1523,23 +1516,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev)
        smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
 }
 
+static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct smsc911x_data *pdata = netdev_priv(dev);
+       u32 intsts = smsc911x_reg_read(pdata, INT_STS);
+       u32 inten = smsc911x_reg_read(pdata, INT_EN);
+       int serviced = IRQ_NONE;
+       u32 temp;
+
+       if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
+               temp = smsc911x_reg_read(pdata, INT_EN);
+               temp &= (~INT_EN_SW_INT_EN_);
+               smsc911x_reg_write(pdata, INT_EN, temp);
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
+               pdata->software_irq_signal = 1;
+               smp_wmb();
+               serviced = IRQ_HANDLED;
+       }
+
+       if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
+               /* Called when there is a multicast update scheduled and
+                * it is now safe to complete the update */
+               SMSC_TRACE(pdata, intr, "RX Stop interrupt");
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
+               if (pdata->multicast_update_pending)
+                       smsc911x_rx_multicast_update_workaround(pdata);
+               serviced = IRQ_HANDLED;
+       }
+
+       if (intsts & inten & INT_STS_TDFA_) {
+               temp = smsc911x_reg_read(pdata, FIFO_INT);
+               temp |= FIFO_INT_TX_AVAIL_LEVEL_;
+               smsc911x_reg_write(pdata, FIFO_INT, temp);
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
+               netif_wake_queue(dev);
+               serviced = IRQ_HANDLED;
+       }
+
+       if (unlikely(intsts & inten & INT_STS_RXE_)) {
+               SMSC_TRACE(pdata, intr, "RX Error interrupt");
+               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
+               serviced = IRQ_HANDLED;
+       }
+
+       if (likely(intsts & inten & INT_STS_RSFL_)) {
+               if (likely(napi_schedule_prep(&pdata->napi))) {
+                       /* Disable Rx interrupts */
+                       temp = smsc911x_reg_read(pdata, INT_EN);
+                       temp &= (~INT_EN_RSFL_EN_);
+                       smsc911x_reg_write(pdata, INT_EN, temp);
+                       /* Schedule a NAPI poll */
+                       __napi_schedule(&pdata->napi);
+               } else {
+                       SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
+               }
+               serviced = IRQ_HANDLED;
+       }
+
+       return serviced;
+}
+
 static int smsc911x_open(struct net_device *dev)
 {
        struct smsc911x_data *pdata = netdev_priv(dev);
        unsigned int timeout;
        unsigned int temp;
        unsigned int intcfg;
+       int retval;
+       int irq_flags;
 
-       /* if the phy is not yet registered, retry later*/
+       /* find and start the given phy */
        if (!dev->phydev) {
-               SMSC_WARN(pdata, hw, "phy_dev is NULL");
-               return -EAGAIN;
+               retval = smsc911x_mii_probe(dev);
+               if (retval < 0) {
+                       SMSC_WARN(pdata, probe, "Error starting phy");
+                       goto out;
+               }
        }
 
        /* Reset the LAN911x */
-       if (smsc911x_soft_reset(pdata)) {
+       retval = smsc911x_soft_reset(pdata);
+       if (retval) {
                SMSC_WARN(pdata, hw, "soft reset failed");
-               return -EIO;
+               goto mii_free_out;
        }
 
        smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
@@ -1595,6 +1655,15 @@ static int smsc911x_open(struct net_device *dev)
        pdata->software_irq_signal = 0;
        smp_wmb();
 
+       irq_flags = irq_get_trigger_type(dev->irq);
+       retval = request_irq(dev->irq, smsc911x_irqhandler,
+                            irq_flags | IRQF_SHARED, dev->name, dev);
+       if (retval) {
+               SMSC_WARN(pdata, probe,
+                         "Unable to claim requested irq: %d", dev->irq);
+               goto mii_free_out;
+       }
+
        temp = smsc911x_reg_read(pdata, INT_EN);
        temp |= INT_EN_SW_INT_EN_;
        smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1609,7 +1678,8 @@ static int smsc911x_open(struct net_device *dev)
        if (!pdata->software_irq_signal) {
                netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
                            dev->irq);
-               return -ENODEV;
+               retval = -ENODEV;
+               goto irq_stop_out;
        }
        SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
                   dev->irq);
@@ -1655,6 +1725,14 @@ static int smsc911x_open(struct net_device *dev)
 
        netif_start_queue(dev);
        return 0;
+
+irq_stop_out:
+       free_irq(dev->irq, dev);
+mii_free_out:
+       phy_disconnect(dev->phydev);
+       dev->phydev = NULL;
+out:
+       return retval;
 }
 
 /* Entry point for stopping the interface */
@@ -1676,9 +1754,15 @@ static int smsc911x_stop(struct net_device *dev)
        dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
        smsc911x_tx_update_txcounters(dev);
 
+       free_irq(dev->irq, dev);
+
        /* Bring the PHY down */
-       if (dev->phydev)
+       if (dev->phydev) {
                phy_stop(dev->phydev);
+               phy_disconnect(dev->phydev);
+               dev->phydev = NULL;
+       }
+       netif_carrier_off(dev);
 
        SMSC_TRACE(pdata, ifdown, "Interface stopped");
        return 0;
@@ -1820,67 +1904,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
        spin_unlock_irqrestore(&pdata->mac_lock, flags);
 }
 
-static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       struct smsc911x_data *pdata = netdev_priv(dev);
-       u32 intsts = smsc911x_reg_read(pdata, INT_STS);
-       u32 inten = smsc911x_reg_read(pdata, INT_EN);
-       int serviced = IRQ_NONE;
-       u32 temp;
-
-       if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
-               temp = smsc911x_reg_read(pdata, INT_EN);
-               temp &= (~INT_EN_SW_INT_EN_);
-               smsc911x_reg_write(pdata, INT_EN, temp);
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
-               pdata->software_irq_signal = 1;
-               smp_wmb();
-               serviced = IRQ_HANDLED;
-       }
-
-       if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
-               /* Called when there is a multicast update scheduled and
-                * it is now safe to complete the update */
-               SMSC_TRACE(pdata, intr, "RX Stop interrupt");
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
-               if (pdata->multicast_update_pending)
-                       smsc911x_rx_multicast_update_workaround(pdata);
-               serviced = IRQ_HANDLED;
-       }
-
-       if (intsts & inten & INT_STS_TDFA_) {
-               temp = smsc911x_reg_read(pdata, FIFO_INT);
-               temp |= FIFO_INT_TX_AVAIL_LEVEL_;
-               smsc911x_reg_write(pdata, FIFO_INT, temp);
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
-               netif_wake_queue(dev);
-               serviced = IRQ_HANDLED;
-       }
-
-       if (unlikely(intsts & inten & INT_STS_RXE_)) {
-               SMSC_TRACE(pdata, intr, "RX Error interrupt");
-               smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
-               serviced = IRQ_HANDLED;
-       }
-
-       if (likely(intsts & inten & INT_STS_RSFL_)) {
-               if (likely(napi_schedule_prep(&pdata->napi))) {
-                       /* Disable Rx interrupts */
-                       temp = smsc911x_reg_read(pdata, INT_EN);
-                       temp &= (~INT_EN_RSFL_EN_);
-                       smsc911x_reg_write(pdata, INT_EN, temp);
-                       /* Schedule a NAPI poll */
-                       __napi_schedule(&pdata->napi);
-               } else {
-                       SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
-               }
-               serviced = IRQ_HANDLED;
-       }
-
-       return serviced;
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void smsc911x_poll_controller(struct net_device *dev)
 {
@@ -2300,16 +2323,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
        pdata = netdev_priv(dev);
        BUG_ON(!pdata);
        BUG_ON(!pdata->ioaddr);
-       BUG_ON(!dev->phydev);
+       WARN_ON(dev->phydev);
 
        SMSC_TRACE(pdata, ifdown, "Stopping driver");
 
-       phy_disconnect(dev->phydev);
        mdiobus_unregister(pdata->mii_bus);
        mdiobus_free(pdata->mii_bus);
 
        unregister_netdev(dev);
-       free_irq(dev->irq, dev);
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                                           "smsc911x-memory");
        if (!res)
@@ -2394,8 +2415,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
        struct smsc911x_data *pdata;
        struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
        struct resource *res;
-       unsigned int intcfg = 0;
-       int res_size, irq, irq_flags;
+       int res_size, irq;
        int retval;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2434,7 +2454,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
 
        pdata = netdev_priv(dev);
        dev->irq = irq;
-       irq_flags = irq_get_trigger_type(irq);
        pdata->ioaddr = ioremap_nocache(res->start, res_size);
 
        pdata->dev = dev;
@@ -2481,43 +2500,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
        if (retval < 0)
                goto out_disable_resources;
 
-       /* configure irq polarity and type before connecting isr */
-       if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
-               intcfg |= INT_CFG_IRQ_POL_;
-
-       if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
-               intcfg |= INT_CFG_IRQ_TYPE_;
-
-       smsc911x_reg_write(pdata, INT_CFG, intcfg);
-
-       /* Ensure interrupts are globally disabled before connecting ISR */
-       smsc911x_disable_irq_chip(dev);
+       netif_carrier_off(dev);
 
-       retval = request_irq(dev->irq, smsc911x_irqhandler,
-                            irq_flags | IRQF_SHARED, dev->name, dev);
+       retval = smsc911x_mii_init(pdev, dev);
        if (retval) {
-               SMSC_WARN(pdata, probe,
-                         "Unable to claim requested irq: %d", dev->irq);
+               SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
                goto out_disable_resources;
        }
 
-       netif_carrier_off(dev);
-
        retval = register_netdev(dev);
        if (retval) {
                SMSC_WARN(pdata, probe, "Error %i registering device", retval);
-               goto out_free_irq;
+               goto out_disable_resources;
        } else {
                SMSC_TRACE(pdata, probe,
                           "Network interface: \"%s\"", dev->name);
        }
 
-       retval = smsc911x_mii_init(pdev, dev);
-       if (retval) {
-               SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
-               goto out_unregister_netdev_5;
-       }
-
        spin_lock_irq(&pdata->mac_lock);
 
        /* Check if mac address has been specified when bringing interface up */
@@ -2553,10 +2552,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
 
        return 0;
 
-out_unregister_netdev_5:
-       unregister_netdev(dev);
-out_free_irq:
-       free_irq(dev->irq, dev);
 out_disable_resources:
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
index 2533b91..d3292c4 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/stmmac.h>
 #include <linux/phy.h>
 #include <linux/module.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
 #define STMMAC_VLAN_TAG_USED
 #include <linux/if_vlan.h>
 #endif
index 79d8b92..e5a926b 100644 (file)
@@ -169,7 +169,8 @@ static int stm32_dwmac_resume(struct device *dev)
 }
 #endif /* CONFIG_PM_SLEEP */
 
-SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops, stm32_dwmac_suspend, stm32_dwmac_resume);
+static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
+       stm32_dwmac_suspend, stm32_dwmac_resume);
 
 static const struct of_device_id stm32_dwmac_match[] = {
        { .compatible = "st,stm32-dwmac"},
index c25d971..0d00531 100644 (file)
@@ -1246,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp)
        lp->mii_bus->read  = &dwceqos_mdio_read;
        lp->mii_bus->write = &dwceqos_mdio_write;
        lp->mii_bus->priv = lp;
-       lp->mii_bus->parent = &lp->ndev->dev;
+       lp->mii_bus->parent = &lp->pdev->dev;
 
        of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
        snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
@@ -2761,7 +2761,7 @@ static const struct ethtool_ops dwceqos_ethtool_ops = {
        .set_link_ksettings = phy_ethtool_set_link_ksettings,
 };
 
-static struct net_device_ops netdev_ops = {
+static const struct net_device_ops netdev_ops = {
        .ndo_open               = dwceqos_open,
        .ndo_stop               = dwceqos_stop,
        .ndo_start_xmit         = dwceqos_start_xmit,
@@ -2853,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev)
 
        ndev->features = ndev->hw_features;
 
-       netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
-
-       ret = register_netdev(ndev);
-       if (ret) {
-               dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-               goto err_out_clk_dis_aper;
-       }
-
        lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
        if (IS_ERR(lp->phy_ref_clk)) {
                dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
                ret = PTR_ERR(lp->phy_ref_clk);
-               goto err_out_unregister_netdev;
+               goto err_out_clk_dis_aper;
        }
 
        ret = clk_prepare_enable(lp->phy_ref_clk);
        if (ret) {
                dev_err(&pdev->dev, "Unable to enable device clock.\n");
-               goto err_out_unregister_netdev;
+               goto err_out_clk_dis_aper;
        }
 
        lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
@@ -2880,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev)
                ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
                if (ret < 0) {
                        dev_err(&pdev->dev, "invalid fixed-link");
-                       goto err_out_unregister_clk_notifier;
+                       goto err_out_clk_dis_phy;
                }
 
                lp->phy_node = of_node_get(lp->pdev->dev.of_node);
@@ -2889,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
        ret = of_get_phy_mode(lp->pdev->dev.of_node);
        if (ret < 0) {
                dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        lp->phy_interface = ret;
@@ -2897,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
        ret = dwceqos_mii_init(lp);
        if (ret) {
                dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        ret = dwceqos_mii_probe(ndev);
        if (ret != 0) {
                netdev_err(ndev, "mii_probe fail.\n");
                ret = -ENXIO;
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2922,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
                        ret);
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
        dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
                 pdev->id, ndev->base_addr, ndev->irq);
@@ -2932,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
                        ndev->irq, ret);
-               goto err_out_unregister_clk_notifier;
+               goto err_out_clk_dis_phy;
        }
 
        if (netif_msg_probe(lp))
                netdev_dbg(ndev, "net_local@%p\n", lp);
 
+       netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+                       goto err_out_clk_dis_phy;
+       }
+
        return 0;
 
-err_out_unregister_clk_notifier:
+err_out_clk_dis_phy:
        clk_disable_unprepare(lp->phy_ref_clk);
-err_out_unregister_netdev:
-       unregister_netdev(ndev);
 err_out_clk_dis_aper:
        clk_disable_unprepare(lp->apb_pclk);
 err_out_free_netdev:
index 79f0ec4..bc258d7 100644 (file)
@@ -1791,7 +1791,7 @@ fail_alloc_rx:
        gelic_card_free_chain(card, card->tx_chain.head);
 fail_alloc_tx:
        free_irq(card->irq, card);
-       netdev->irq = NO_IRQ;
+       netdev->irq = 0;
 fail_request_irq:
        ps3_sb_event_receive_port_destroy(dev, card->irq);
 fail_alloc_irq:
@@ -1843,7 +1843,7 @@ static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
        netdev0 = card->netdev[GELIC_PORT_ETHERNET_0];
        /* disconnect event port */
        free_irq(card->irq, card);
-       netdev0->irq = NO_IRQ;
+       netdev0->irq = 0;
        ps3_sb_event_receive_port_destroy(card->dev, card->irq);
 
        wait_event(card->waitq,
index d95a50a..622ab3a 100644 (file)
@@ -484,7 +484,7 @@ static void bpq_setup(struct net_device *dev)
        dev->flags      = 0;
        dev->features   = NETIF_F_LLTX; /* Allow recursion */
 
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
        dev->header_ops      = &ax25_header_ops;
 #endif
 
index 87b566f..5078a0d 100644 (file)
@@ -320,6 +320,13 @@ config XILINX_GMII2RGMII
          the Reduced Gigabit Media Independent Interface(RGMII) between
          Ethernet physical media devices and the Gigabit Ethernet controller.
 
+config MDIO_XGENE
+       tristate "APM X-Gene SoC MDIO bus controller"
+       depends on ARCH_XGENE || COMPILE_TEST
+       help
+         This module provides a driver for the MDIO busses found in the
+         APM X-Gene SoC's.
+
 endif # PHYLIB
 
 config MICREL_KS8995MA
index 55674b0..85c271c 100644 (file)
@@ -37,9 +37,6 @@
 #include <net/l3mdev.h>
 #include <net/fib_rules.h>
 
-#define RT_FL_TOS(oldflp4) \
-       ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
-
 #define DRV_NAME       "vrf"
 #define DRV_VERSION    "1.0"
 
index 4bfeb97..e7d1668 100644 (file)
@@ -2780,14 +2780,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        struct net_device *lowerdev = NULL;
 
        if (conf->flags & VXLAN_F_GPE) {
-               if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
-                       return -EINVAL;
                /* For now, allow GPE only together with COLLECT_METADATA.
                 * This can be relaxed later; in such case, the other side
                 * of the PtP link will have to be provided.
                 */
-               if (!(conf->flags & VXLAN_F_COLLECT_METADATA))
+               if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
+                   !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+                       pr_info("unsupported combination of extensions\n");
                        return -EINVAL;
+               }
 
                vxlan_raw_setup(dev);
        } else {
@@ -2840,6 +2841,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
                        dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
 
                needed_headroom = lowerdev->hard_header_len;
+       } else if (vxlan_addr_multicast(&dst->remote_ip)) {
+               pr_info("multicast destination requires interface to be specified\n");
+               return -EINVAL;
        }
 
        if (conf->mtu) {
@@ -2872,8 +2876,10 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
                     tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
                    tmp->cfg.dst_port == vxlan->cfg.dst_port &&
                    (tmp->flags & VXLAN_F_RCV_FLAGS) ==
-                   (vxlan->flags & VXLAN_F_RCV_FLAGS))
-               return -EEXIST;
+                   (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
+                       pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
+                       return -EEXIST;
+               }
        }
 
        dev->ethtool_ops = &vxlan_ethtool_ops;
@@ -2907,7 +2913,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
        struct vxlan_config conf;
-       int err;
 
        memset(&conf, 0, sizeof(conf));
 
@@ -3016,26 +3021,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        if (tb[IFLA_MTU])
                conf.mtu = nla_get_u32(tb[IFLA_MTU]);
 
-       err = vxlan_dev_configure(src_net, dev, &conf);
-       switch (err) {
-       case -ENODEV:
-               pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
-               break;
-
-       case -EPERM:
-               pr_info("IPv6 is disabled via sysctl\n");
-               break;
-
-       case -EEXIST:
-               pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
-               break;
-
-       case -EINVAL:
-               pr_info("unsupported combination of extensions\n");
-               break;
-       }
-
-       return err;
+       return vxlan_dev_configure(src_net, dev, &conf);
 }
 
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
index acec16b..b99ad5d 100644 (file)
@@ -462,13 +462,13 @@ static void ath10k_ahb_halt_chip(struct ath10k *ar)
 static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
 {
        struct ath10k *ar = arg;
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
        if (!ath10k_pci_irq_pending(ar))
                return IRQ_NONE;
 
        ath10k_pci_disable_and_clear_legacy_irq(ar);
-       tasklet_schedule(&ar_pci->intr_tq);
+       ath10k_pci_irq_msi_fw_mask(ar);
+       napi_schedule(&ar->napi);
 
        return IRQ_HANDLED;
 }
@@ -577,7 +577,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
 
-       ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n",
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
                   ar_ahb->mem, ar_ahb->mem_len,
                   ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
        return 0;
@@ -717,6 +717,9 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar)
        synchronize_irq(ar_ahb->irq);
 
        ath10k_pci_flush(ar);
+
+       napi_synchronize(&ar->napi);
+       napi_disable(&ar->napi);
 }
 
 static int ath10k_ahb_hif_power_up(struct ath10k *ar)
@@ -748,6 +751,7 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
                ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
                goto err_ce_deinit;
        }
+       napi_enable(&ar->napi);
 
        return 0;
 
@@ -831,7 +835,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
                goto err_resource_deinit;
        }
 
-       ath10k_pci_init_irq_tasklets(ar);
+       ath10k_pci_init_napi(ar);
 
        ret = ath10k_ahb_request_irq_legacy(ar);
        if (ret)
index 3d29b08..2872d34 100644 (file)
@@ -221,7 +221,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
        u32 txlen;
        int ret;
 
-       ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+       ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
                   buffer, length);
 
        if (ar->bmi.done_sent) {
@@ -287,7 +287,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
        int ret;
 
        ath10k_dbg(ar, ATH10K_DBG_BMI,
-                  "bmi fast download address 0x%x buffer 0x%p length %d\n",
+                  "bmi fast download address 0x%x buffer 0x%pK length %d\n",
                   address, buffer, length);
 
        ret = ath10k_bmi_lz_stream_start(ar, address);
index 9fb8d74..65d8d71 100644 (file)
@@ -840,7 +840,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
        ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT,
-                  "boot init ce src ring id %d entries %d base_addr %p\n",
+                  "boot init ce src ring id %d entries %d base_addr %pK\n",
                   ce_id, nentries, src_ring->base_addr_owner_space);
 
        return 0;
@@ -874,7 +874,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
        ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT,
-                  "boot ce dest ring id %d entries %d base_addr %p\n",
+                  "boot ce dest ring id %d entries %d base_addr %pK\n",
                   ce_id, nentries, dest_ring->base_addr_owner_space);
 
        return 0;
index e889829..3a8984b 100644 (file)
@@ -60,7 +60,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .otp_exe_param = 0,
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
-               .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
                .cal_data_len = 2116,
                .fw = {
                        .dir = QCA988X_HW_2_0_FW_DIR,
@@ -68,6 +67,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA988X_BOARD_DATA_SZ,
                        .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
                },
+               .hw_ops = &qca988x_ops,
        },
        {
                .id = QCA9887_HW_1_0_VERSION,
@@ -79,7 +79,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .otp_exe_param = 0,
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
-               .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
                .cal_data_len = 2116,
                .fw = {
                        .dir = QCA9887_HW_1_0_FW_DIR,
@@ -87,6 +86,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA9887_BOARD_DATA_SZ,
                        .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
                },
+               .hw_ops = &qca988x_ops,
        },
        {
                .id = QCA6174_HW_2_1_VERSION,
@@ -104,6 +104,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA6174_BOARD_DATA_SZ,
                        .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
                },
+               .hw_ops = &qca988x_ops,
        },
        {
                .id = QCA6174_HW_2_1_VERSION,
@@ -114,7 +115,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .otp_exe_param = 0,
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
-               .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
                .cal_data_len = 8124,
                .fw = {
                        .dir = QCA6174_HW_2_1_FW_DIR,
@@ -122,6 +122,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA6174_BOARD_DATA_SZ,
                        .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
                },
+               .hw_ops = &qca988x_ops,
        },
        {
                .id = QCA6174_HW_3_0_VERSION,
@@ -132,7 +133,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .otp_exe_param = 0,
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
-               .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
                .cal_data_len = 8124,
                .fw = {
                        .dir = QCA6174_HW_3_0_FW_DIR,
@@ -140,6 +140,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA6174_BOARD_DATA_SZ,
                        .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
                },
+               .hw_ops = &qca988x_ops,
        },
        {
                .id = QCA6174_HW_3_2_VERSION,
@@ -150,7 +151,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .otp_exe_param = 0,
                .channel_counters_freq_hz = 88000,
                .max_probe_resp_desc_thres = 0,
-               .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
                .cal_data_len = 8124,
                .fw = {
                        /* uses same binaries as hw3.0 */
@@ -159,6 +159,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA6174_BOARD_DATA_SZ,
                        .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
                },
+               .hw_ops = &qca988x_ops,
        },
        {
                .id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -171,7 +172,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .cck_rate_map_rev2 = true,
                .channel_counters_freq_hz = 150000,
                .max_probe_resp_desc_thres = 24,
-               .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
                .tx_chain_mask = 0xf,
                .rx_chain_mask = 0xf,
                .max_spatial_stream = 4,
@@ -182,6 +182,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA99X0_BOARD_DATA_SZ,
                        .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
                },
+               .sw_decrypt_mcast_mgmt = true,
+               .hw_ops = &qca99x0_ops,
        },
        {
                .id = QCA9984_HW_1_0_DEV_VERSION,
@@ -194,7 +196,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .cck_rate_map_rev2 = true,
                .channel_counters_freq_hz = 150000,
                .max_probe_resp_desc_thres = 24,
-               .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
                .tx_chain_mask = 0xf,
                .rx_chain_mask = 0xf,
                .max_spatial_stream = 4,
@@ -205,6 +206,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA99X0_BOARD_DATA_SZ,
                        .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
                },
+               .sw_decrypt_mcast_mgmt = true,
+               .hw_ops = &qca99x0_ops,
        },
        {
                .id = QCA9888_HW_2_0_DEV_VERSION,
@@ -216,7 +219,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .continuous_frag_desc = true,
                .channel_counters_freq_hz = 150000,
                .max_probe_resp_desc_thres = 24,
-               .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
                .tx_chain_mask = 3,
                .rx_chain_mask = 3,
                .max_spatial_stream = 2,
@@ -227,6 +229,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA99X0_BOARD_DATA_SZ,
                        .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
                },
+               .sw_decrypt_mcast_mgmt = true,
+               .hw_ops = &qca99x0_ops,
        },
        {
                .id = QCA9377_HW_1_0_DEV_VERSION,
@@ -244,6 +248,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA9377_BOARD_DATA_SZ,
                        .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
                },
+               .hw_ops = &qca988x_ops,
        },
        {
                .id = QCA9377_HW_1_1_DEV_VERSION,
@@ -261,6 +266,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA9377_BOARD_DATA_SZ,
                        .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
                },
+               .hw_ops = &qca988x_ops,
        },
        {
                .id = QCA4019_HW_1_0_DEV_VERSION,
@@ -274,7 +280,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .cck_rate_map_rev2 = true,
                .channel_counters_freq_hz = 125000,
                .max_probe_resp_desc_thres = 24,
-               .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
                .tx_chain_mask = 0x3,
                .rx_chain_mask = 0x3,
                .max_spatial_stream = 2,
@@ -285,6 +290,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_size = QCA4019_BOARD_DATA_SZ,
                        .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
                },
+               .sw_decrypt_mcast_mgmt = true,
+               .hw_ops = &qca99x0_ops,
        },
 };
 
@@ -304,6 +311,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
        [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
        [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
        [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
+       [ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
 };
 
 static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -699,7 +707,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
 
        if (!ar->running_fw->fw_file.otp_data ||
            !ar->running_fw->fw_file.otp_len) {
-               ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+               ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
                            ar->running_fw->fw_file.otp_data,
                            ar->running_fw->fw_file.otp_len);
                return 0;
@@ -745,7 +753,7 @@ static int ath10k_download_fw(struct ath10k *ar)
        data = ar->running_fw->fw_file.firmware_data;
        data_len = ar->running_fw->fw_file.firmware_len;
 
-       ret = ath10k_swap_code_seg_configure(ar);
+       ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file);
        if (ret) {
                ath10k_err(ar, "failed to configure fw code swap: %d\n",
                           ret);
@@ -753,7 +761,7 @@ static int ath10k_download_fw(struct ath10k *ar)
        }
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT,
-                  "boot uploading firmware image %p len %d\n",
+                  "boot uploading firmware image %pK len %d\n",
                   data, data_len);
 
        ret = ath10k_bmi_fast_download(ar, address, data, data_len);
@@ -787,7 +795,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
        if (!IS_ERR(ar->pre_cal_file))
                release_firmware(ar->pre_cal_file);
 
-       ath10k_swap_code_seg_release(ar);
+       ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file);
 
        ar->normal_mode_fw.fw_file.otp_data = NULL;
        ar->normal_mode_fw.fw_file.otp_len = 0;
@@ -1497,14 +1505,14 @@ static void ath10k_core_restart(struct work_struct *work)
 
        ieee80211_stop_queues(ar->hw);
        ath10k_drain_tx(ar);
-       complete_all(&ar->scan.started);
-       complete_all(&ar->scan.completed);
-       complete_all(&ar->scan.on_channel);
-       complete_all(&ar->offchan_tx_completed);
-       complete_all(&ar->install_key_done);
-       complete_all(&ar->vdev_setup_done);
-       complete_all(&ar->thermal.wmi_sync);
-       complete_all(&ar->bss_survey_done);
+       complete(&ar->scan.started);
+       complete(&ar->scan.completed);
+       complete(&ar->scan.on_channel);
+       complete(&ar->offchan_tx_completed);
+       complete(&ar->install_key_done);
+       complete(&ar->vdev_setup_done);
+       complete(&ar->thermal.wmi_sync);
+       complete(&ar->bss_survey_done);
        wake_up(&ar->htt.empty_tx_wq);
        wake_up(&ar->wmi.tx_credits_wq);
        wake_up(&ar->peer_mapping_wq);
@@ -1705,6 +1713,55 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
        return 0;
 }
 
+static int ath10k_core_reset_rx_filter(struct ath10k *ar)
+{
+       int ret;
+       int vdev_id;
+       int vdev_type;
+       int vdev_subtype;
+       const u8 *vdev_addr;
+
+       vdev_id = 0;
+       vdev_type = WMI_VDEV_TYPE_STA;
+       vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
+       vdev_addr = ar->mac_addr;
+
+       ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype,
+                                    vdev_addr);
+       if (ret) {
+               ath10k_err(ar, "failed to create dummy vdev: %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_wmi_vdev_delete(ar, vdev_id);
+       if (ret) {
+               ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret);
+               return ret;
+       }
+
+       /* WMI and HTT may use separate HIF pipes and are not guaranteed to be
+        * serialized properly implicitly.
+        *
+        * Moreover (most) WMI commands have no explicit acknowledges. It is
+        * possible to infer it implicitly by poking firmware with echo
+        * command - getting a reply means all preceding comments have been
+        * (mostly) processed.
+        *
+        * In case of vdev create/delete this is sufficient.
+        *
+        * Without this it's possible to end up with a race when HTT Rx ring is
+        * started before vdev create/delete hack is complete allowing a short
+        * window of opportunity to receive (and Tx ACK) a bunch of frames.
+        */
+       ret = ath10k_wmi_barrier(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to ping firmware: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
                      const struct ath10k_fw_components *fw)
 {
@@ -1872,6 +1929,25 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
                goto err_hif_stop;
        }
 
+       /* Some firmware revisions do not properly set up hardware rx filter
+        * registers.
+        *
+        * A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK
+        * is filled with 0s instead of 1s allowing HW to respond with ACKs to
+        * any frames that matches MAC_PCU_RX_FILTER which is also
+        * misconfigured to accept anything.
+        *
+        * The ADDR1 is programmed using internal firmware structure field and
+        * can't be (easily/sanely) reached from the driver explicitly. It is
+        * possible to implicitly make it correct by creating a dummy vdev and
+        * then deleting it.
+        */
+       status = ath10k_core_reset_rx_filter(ar);
+       if (status) {
+               ath10k_err(ar, "failed to reset rx filter: %d\n", status);
+               goto err_hif_stop;
+       }
+
        /* If firmware indicates Full Rx Reorder support it must be used in a
         * slightly different manner. Let HTT code know.
         */
@@ -2031,7 +2107,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                goto err_free_firmware_files;
        }
 
-       ret = ath10k_swap_code_seg_init(ar);
+       ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
        if (ret) {
                ath10k_err(ar, "failed to initialize code swap segment: %d\n",
                           ret);
@@ -2072,6 +2148,9 @@ static void ath10k_core_register_work(struct work_struct *work)
        struct ath10k *ar = container_of(work, struct ath10k, register_work);
        int status;
 
+       /* peer stats are enabled by default */
+       set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
        status = ath10k_core_probe_fw(ar);
        if (status) {
                ath10k_err(ar, "could not probe fw (%d)\n", status);
@@ -2249,6 +2328,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
        INIT_WORK(&ar->register_work, ath10k_core_register_work);
        INIT_WORK(&ar->restart_work, ath10k_core_restart);
 
+       init_dummy_netdev(&ar->napi_dev);
+
        ret = ath10k_debug_create(ar);
        if (ret)
                goto err_free_aux_wq;
index 30ae5bf..6ec9495 100644 (file)
 #define ATH10K_KEEPALIVE_MAX_IDLE 3895
 #define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
 
+/* NAPI poll budget */
+#define ATH10K_NAPI_BUDGET      64
+#define ATH10K_NAPI_QUOTA_LIMIT 60
+
 struct ath10k;
 
 enum ath10k_bus {
@@ -142,6 +146,7 @@ struct ath10k_wmi {
        enum ath10k_htc_ep_id eid;
        struct completion service_ready;
        struct completion unified_ready;
+       struct completion barrier;
        wait_queue_head_t tx_credits_wq;
        DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
        struct wmi_cmd_map *cmd;
@@ -440,7 +445,7 @@ struct ath10k_debug {
        struct completion tpc_complete;
 
        /* protected by conf_mutex */
-       u32 fw_dbglog_mask;
+       u64 fw_dbglog_mask;
        u32 fw_dbglog_level;
        u32 pktlog_filter;
        u32 reg_addr;
@@ -551,6 +556,13 @@ enum ath10k_fw_features {
         */
        ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
 
+       /* Older firmware with HTT delivers incorrect tx status for null func
+        * frames to driver, but this fixed in 10.2 and 10.4 firmware versions.
+        * Also this workaround results in reporting of incorrect null func
+        * status for 10.4. This flag is used to skip the workaround.
+        */
+       ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15,
+
        /* keep last */
        ATH10K_FW_FEATURE_COUNT,
 };
@@ -663,6 +675,15 @@ struct ath10k_fw_file {
 
        const void *codeswap_data;
        size_t codeswap_len;
+
+       /* The original idea of struct ath10k_fw_file was that it only
+        * contains struct firmware and pointers to various parts (actual
+        * firmware binary, otp, metadata etc) of the file. This seg_info
+        * is actually created separate but as this is used similarly as
+        * the other firmware components it's more convenient to have it
+        * here.
+        */
+       struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
 };
 
 struct ath10k_fw_components {
@@ -715,53 +736,7 @@ struct ath10k {
        struct ath10k_htc htc;
        struct ath10k_htt htt;
 
-       struct ath10k_hw_params {
-               u32 id;
-               u16 dev_id;
-               const char *name;
-               u32 patch_load_addr;
-               int uart_pin;
-               u32 otp_exe_param;
-
-               /* Type of hw cycle counter wraparound logic, for more info
-                * refer enum ath10k_hw_cc_wraparound_type.
-                */
-               enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
-
-               /* Some of chip expects fragment descriptor to be continuous
-                * memory for any TX operation. Set continuous_frag_desc flag
-                * for the hardware which have such requirement.
-                */
-               bool continuous_frag_desc;
-
-               /* CCK hardware rate table mapping for the newer chipsets
-                * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
-                * are in a proper order with respect to the rate/preamble
-                */
-               bool cck_rate_map_rev2;
-
-               u32 channel_counters_freq_hz;
-
-               /* Mgmt tx descriptors threshold for limiting probe response
-                * frames.
-                */
-               u32 max_probe_resp_desc_thres;
-
-               /* The padding bytes's location is different on various chips */
-               enum ath10k_hw_4addr_pad hw_4addr_pad;
-
-               u32 tx_chain_mask;
-               u32 rx_chain_mask;
-               u32 max_spatial_stream;
-               u32 cal_data_len;
-
-               struct ath10k_hw_params_fw {
-                       const char *dir;
-                       const char *board;
-                       size_t board_size;
-                       size_t board_ext_size;
-               } fw;
-       } hw_params;
+       struct ath10k_hw_params hw_params;
 
        /* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
        struct ath10k_fw_components normal_mode_fw;
@@ -774,10 +749,6 @@ struct ath10k {
        const struct firmware *pre_cal_file;
        const struct firmware *cal_file;
 
-       struct {
-               struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
-       } swap;
-
        struct {
                u32 vendor;
                u32 device;
@@ -936,6 +907,10 @@ struct ath10k {
        struct ath10k_thermal thermal;
        struct ath10k_wow wow;
 
+       /* NAPI */
+       struct net_device napi_dev;
+       struct napi_struct napi;
+
        /* must be last */
        u8 drv_priv[0] __aligned(sizeof(void *));
 };
index 8f0fd41..832da6e 100644 (file)
@@ -1228,9 +1228,9 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
 {
        struct ath10k *ar = file->private_data;
        unsigned int len;
-       char buf[64];
+       char buf[96];
 
-       len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
+       len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n",
                        ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -1242,15 +1242,16 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
 {
        struct ath10k *ar = file->private_data;
        int ret;
-       char buf[64];
-       unsigned int log_level, mask;
+       char buf[96];
+       unsigned int log_level;
+       u64 mask;
 
        simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
 
        /* make sure that buf is null terminated */
        buf[sizeof(buf) - 1] = 0;
 
-       ret = sscanf(buf, "%x %u", &mask, &log_level);
+       ret = sscanf(buf, "%llx %u", &mask, &log_level);
 
        if (!ret)
                return -EINVAL;
index 5b3c6bc..175aae3 100644 (file)
@@ -44,7 +44,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
        skb_cb = ATH10K_SKB_CB(skb);
        memset(skb_cb, 0, sizeof(*skb_cb));
 
-       ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+       ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
        return skb;
 }
 
@@ -62,7 +62,7 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
 {
        struct ath10k *ar = ep->htc->ar;
 
-       ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+       ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
                   ep->eid, skb);
 
        ath10k_htc_restore_tx_skb(ep->htc, skb);
@@ -404,7 +404,7 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
                goto out;
        }
 
-       ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+       ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
                   eid, skb);
        ep->ep_ops.ep_rx_complete(ar, skb);
 
index 430a83e..98c1424 100644 (file)
@@ -1665,7 +1665,6 @@ struct ath10k_htt {
 
        /* This is used to group tx/rx completions separately and process them
         * in batches to reduce cache stalls */
-       struct tasklet_struct txrx_compl_task;
        struct sk_buff_head rx_compl_q;
        struct sk_buff_head rx_in_ord_compl_q;
        struct sk_buff_head tx_fetch_ind_q;
@@ -1798,5 +1797,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt,
                  struct sk_buff *msdu);
 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
                                             struct sk_buff *skb);
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
 
 #endif
index 78db5d6..a3785a9 100644 (file)
@@ -34,7 +34,6 @@
 #define HTT_RX_RING_REFILL_RESCHED_MS 5
 
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
-static void ath10k_htt_txrx_compl_task(unsigned long ptr);
 
 static struct sk_buff *
 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
@@ -226,7 +225,6 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
 void ath10k_htt_rx_free(struct ath10k_htt *htt)
 {
        del_timer_sync(&htt->rx_ring.refill_retry_timer);
-       tasklet_kill(&htt->txrx_compl_task);
 
        skb_queue_purge(&htt->rx_compl_q);
        skb_queue_purge(&htt->rx_in_ord_compl_q);
@@ -520,9 +518,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
        skb_queue_head_init(&htt->tx_fetch_ind_q);
        atomic_set(&htt->num_mpdus_ready, 0);
 
-       tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
-                    (unsigned long)htt);
-
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
                   htt->rx_ring.size, htt->rx_ring.fill_level);
        return 0;
@@ -931,7 +926,7 @@ static void ath10k_process_rx(struct ath10k *ar,
        *status = *rx_status;
 
        ath10k_dbg(ar, ATH10K_DBG_DATA,
-                  "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
+                  "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
                   skb,
                   skb->len,
                   ieee80211_get_SA(hdr),
@@ -958,7 +953,7 @@ static void ath10k_process_rx(struct ath10k *ar,
        trace_ath10k_rx_hdr(ar, skb->data, skb->len);
        trace_ath10k_rx_payload(ar, skb->data, skb->len);
 
-       ieee80211_rx(ar->hw, skb);
+       ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
 }
 
 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
@@ -1056,9 +1051,11 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
                                          const u8 first_hdr[64])
 {
        struct ieee80211_hdr *hdr;
+       struct htt_rx_desc *rxd;
        size_t hdr_len;
        u8 da[ETH_ALEN];
        u8 sa[ETH_ALEN];
+       int l3_pad_bytes;
 
        /* Delivered decapped frame:
         * [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1072,19 +1069,12 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
         */
 
        /* pull decapped header and copy SA & DA */
-       if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
-           ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
-               /* The QCA99X0 4 address mode pad 2 bytes at the
-                * beginning of MSDU
-                */
-               hdr = (struct ieee80211_hdr *)(msdu->data + 2);
-               /* The skb length need be extended 2 as the 2 bytes at the tail
-                * be excluded due to the padding
-                */
-               skb_put(msdu, 2);
-       } else {
-               hdr = (struct ieee80211_hdr *)(msdu->data);
-       }
+       rxd = (void *)msdu->data - sizeof(*rxd);
+
+       l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+       skb_put(msdu, l3_pad_bytes);
+
+       hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
 
        hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
        ether_addr_copy(da, ieee80211_get_DA(hdr));
@@ -1151,6 +1141,8 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
        void *rfc1042;
        u8 da[ETH_ALEN];
        u8 sa[ETH_ALEN];
+       int l3_pad_bytes;
+       struct htt_rx_desc *rxd;
 
        /* Delivered decapped frame:
         * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1161,6 +1153,11 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
        if (WARN_ON_ONCE(!rfc1042))
                return;
 
+       rxd = (void *)msdu->data - sizeof(*rxd);
+       l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+       skb_put(msdu, l3_pad_bytes);
+       skb_pull(msdu, l3_pad_bytes);
+
        /* pull decapped header and copy SA & DA */
        eth = (struct ethhdr *)msdu->data;
        ether_addr_copy(da, eth->h_dest);
@@ -1191,6 +1188,8 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
 {
        struct ieee80211_hdr *hdr;
        size_t hdr_len;
+       int l3_pad_bytes;
+       struct htt_rx_desc *rxd;
 
        /* Delivered decapped frame:
         * [amsdu header] <-- replaced with 802.11 hdr
@@ -1198,7 +1197,11 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
         * [payload]
         */
 
-       skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
+       rxd = (void *)msdu->data - sizeof(*rxd);
+       l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+
+       skb_put(msdu, l3_pad_bytes);
+       skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
 
        hdr = (struct ieee80211_hdr *)first_hdr;
        hdr_len = ieee80211_hdrlen(hdr->frame_control);
@@ -1525,9 +1528,9 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
-       static struct ieee80211_rx_status rx_status;
+       struct ieee80211_rx_status *rx_status = &htt->rx_status;
        struct sk_buff_head amsdu;
-       int ret;
+       int ret, num_msdus;
 
        __skb_queue_head_init(&amsdu);
 
@@ -1549,13 +1552,14 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
                return ret;
        }
 
-       ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
+       num_msdus = skb_queue_len(&amsdu);
+       ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
        ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
-       ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
-       ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
-       ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
+       ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+       ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+       ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
 
-       return 0;
+       return num_msdus;
 }
 
 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
@@ -1579,15 +1583,6 @@ static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
                mpdu_count += mpdu_ranges[i].mpdu_count;
 
        atomic_add(mpdu_count, &htt->num_mpdus_ready);
-
-       tasklet_schedule(&htt->txrx_compl_task);
-}
-
-static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
-{
-       atomic_inc(&htt->num_mpdus_ready);
-
-       tasklet_schedule(&htt->txrx_compl_task);
 }
 
 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
@@ -1772,14 +1767,15 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
                        RX_FLAG_MMIC_STRIPPED;
 }
 
-static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
-                                      struct sk_buff_head *list)
+static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+                                     struct sk_buff_head *list)
 {
        struct ath10k_htt *htt = &ar->htt;
        struct ieee80211_rx_status *status = &htt->rx_status;
        struct htt_rx_offload_msdu *rx;
        struct sk_buff *msdu;
        size_t offset;
+       int num_msdu = 0;
 
        while ((msdu = __skb_dequeue(list))) {
                /* Offloaded frames don't have Rx descriptor. Instead they have
@@ -1819,10 +1815,12 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
                ath10k_htt_rx_h_rx_offload_prot(status, msdu);
                ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
                ath10k_process_rx(ar, status, msdu);
+               num_msdu++;
        }
+       return num_msdu;
 }
 
-static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ath10k_htt *htt = &ar->htt;
        struct htt_resp *resp = (void *)skb->data;
@@ -1835,12 +1833,12 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
        u8 tid;
        bool offload;
        bool frag;
-       int ret;
+       int ret, num_msdus = 0;
 
        lockdep_assert_held(&htt->rx_ring.lock);
 
        if (htt->rx_confused)
-               return;
+               return -EIO;
 
        skb_pull(skb, sizeof(resp->hdr));
        skb_pull(skb, sizeof(resp->rx_in_ord_ind));
@@ -1859,7 +1857,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 
        if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
                ath10k_warn(ar, "dropping invalid in order rx indication\n");
-               return;
+               return -EINVAL;
        }
 
        /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
@@ -1870,14 +1868,14 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
        if (ret < 0) {
                ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
                htt->rx_confused = true;
-               return;
+               return -EIO;
        }
 
        /* Offloaded frames are very different and need to be handled
         * separately.
         */
        if (offload)
-               ath10k_htt_rx_h_rx_offload(ar, &list);
+               num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
 
        while (!skb_queue_empty(&list)) {
                __skb_queue_head_init(&amsdu);
@@ -1890,6 +1888,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
                         * better to report something than nothing though. This
                         * should still give an idea about rx rate to the user.
                         */
+                       num_msdus += skb_queue_len(&amsdu);
                        ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
                        ath10k_htt_rx_h_filter(ar, &amsdu, status);
                        ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
@@ -1902,9 +1901,10 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
                        ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
                        htt->rx_confused = true;
                        __skb_queue_purge(&list);
-                       return;
+                       return -EIO;
                }
        }
+       return num_msdus;
 }
 
 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
@@ -2267,7 +2267,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
        }
        case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
                ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
-               tasklet_schedule(&htt->txrx_compl_task);
                break;
        case HTT_T2H_MSG_TYPE_SEC_IND: {
                struct ath10k *ar = htt->ar;
@@ -2284,7 +2283,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
        case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
                ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
                                skb->data, skb->len);
-               ath10k_htt_rx_frag_handler(htt);
+               atomic_inc(&htt->num_mpdus_ready);
                break;
        }
        case HTT_T2H_MSG_TYPE_TEST:
@@ -2320,8 +2319,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                break;
        }
        case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
-               skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
-               tasklet_schedule(&htt->txrx_compl_task);
+               __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
                return false;
        }
        case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
@@ -2347,7 +2345,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                        break;
                }
                skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
-               tasklet_schedule(&htt->txrx_compl_task);
                break;
        }
        case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
@@ -2376,27 +2373,77 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
 }
 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
 
-static void ath10k_htt_txrx_compl_task(unsigned long ptr)
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
 {
-       struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
-       struct ath10k *ar = htt->ar;
+       struct ath10k_htt *htt = &ar->htt;
        struct htt_tx_done tx_done = {};
-       struct sk_buff_head rx_ind_q;
        struct sk_buff_head tx_ind_q;
        struct sk_buff *skb;
        unsigned long flags;
-       int num_mpdus;
+       int quota = 0, done, num_rx_msdus;
+       bool resched_napi = false;
 
-       __skb_queue_head_init(&rx_ind_q);
        __skb_queue_head_init(&tx_ind_q);
 
-       spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
-       skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
-       spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
+       /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
+        * process it first to utilize full available quota.
+        */
+       while (quota < budget) {
+               if (skb_queue_empty(&htt->rx_in_ord_compl_q))
+                       break;
 
-       spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
-       skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
-       spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
+               skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
+               if (!skb) {
+                       resched_napi = true;
+                       goto exit;
+               }
+
+               spin_lock_bh(&htt->rx_ring.lock);
+               num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
+               spin_unlock_bh(&htt->rx_ring.lock);
+               if (num_rx_msdus < 0) {
+                       resched_napi = true;
+                       goto exit;
+               }
+
+               dev_kfree_skb_any(skb);
+               if (num_rx_msdus > 0)
+                       quota += num_rx_msdus;
+
+               if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
+                   !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
+                       resched_napi = true;
+                       goto exit;
+               }
+       }
+
+       while (quota < budget) {
+               /* no more data to receive */
+               if (!atomic_read(&htt->num_mpdus_ready))
+                       break;
+
+               num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
+               if (num_rx_msdus < 0) {
+                       resched_napi = true;
+                       goto exit;
+               }
+
+               quota += num_rx_msdus;
+               atomic_dec(&htt->num_mpdus_ready);
+               if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
+                   atomic_read(&htt->num_mpdus_ready)) {
+                       resched_napi = true;
+                       goto exit;
+               }
+       }
+
+       /* From NAPI documentation:
+        *  The napi poll() function may also process TX completions, in which
+        *  case if it processes the entire TX ring then it should count that
+        *  work as the rest of the budget.
+        */
+       if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
+               quota = budget;
 
        /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
         * From kfifo_get() documentation:
@@ -2406,27 +2453,24 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
        while (kfifo_get(&htt->txdone_fifo, &tx_done))
                ath10k_txrx_tx_unref(htt, &tx_done);
 
-       while ((skb = __skb_dequeue(&tx_ind_q))) {
-               ath10k_htt_rx_tx_fetch_ind(ar, skb);
-               dev_kfree_skb_any(skb);
-       }
-
-       num_mpdus = atomic_read(&htt->num_mpdus_ready);
-
-       while (num_mpdus) {
-               if (ath10k_htt_rx_handle_amsdu(htt))
-                       break;
+       ath10k_mac_tx_push_pending(ar);
 
-               num_mpdus--;
-               atomic_dec(&htt->num_mpdus_ready);
-       }
+       spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
+       skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
+       spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
 
-       while ((skb = __skb_dequeue(&rx_ind_q))) {
-               spin_lock_bh(&htt->rx_ring.lock);
-               ath10k_htt_rx_in_ord_ind(ar, skb);
-               spin_unlock_bh(&htt->rx_ring.lock);
+       while ((skb = __skb_dequeue(&tx_ind_q))) {
+               ath10k_htt_rx_tx_fetch_ind(ar, skb);
                dev_kfree_skb_any(skb);
        }
 
+exit:
        ath10k_htt_rx_msdu_buff_replenish(htt);
+       /* In case of rx failure or more data to read, report budget
+        * to reschedule NAPI poll
+        */
+       done = resched_napi ? budget : quota;
+
+       return done;
 }
+EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
index 7c072b6..ae5b33f 100644 (file)
@@ -390,8 +390,6 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
 {
        int size;
 
-       tasklet_kill(&htt->txrx_compl_task);
-
        idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
        idr_destroy(&htt->pending_tx);
 
index f903d46..c2ecb9b 100644 (file)
@@ -219,3 +219,16 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
        survey->time = CCNT_TO_MSEC(ar, cc);
        survey->time_busy = CCNT_TO_MSEC(ar, rcc);
 }
+
+const struct ath10k_hw_ops qca988x_ops = {
+};
+
+static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
+{
+       return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
+                 RX_MSDU_END_INFO1_L3_HDR_PAD);
+}
+
+const struct ath10k_hw_ops qca99x0_ops = {
+       .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+};
index e014cd7..308e423 100644 (file)
@@ -338,11 +338,6 @@ enum ath10k_hw_rate_rev2_cck {
        ATH10K_HW_RATE_REV2_CCK_SP_11M,
 };
 
-enum ath10k_hw_4addr_pad {
-       ATH10K_HW_4ADDR_PAD_AFTER,
-       ATH10K_HW_4ADDR_PAD_BEFORE,
-};
-
 enum ath10k_hw_cc_wraparound_type {
        ATH10K_HW_CC_WRAP_DISABLED = 0,
 
@@ -363,6 +358,77 @@ enum ath10k_hw_cc_wraparound_type {
        ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
 };
 
+struct ath10k_hw_params {
+       u32 id;
+       u16 dev_id;
+       const char *name;
+       u32 patch_load_addr;
+       int uart_pin;
+       u32 otp_exe_param;
+
+       /* Type of hw cycle counter wraparound logic, for more info
+        * refer enum ath10k_hw_cc_wraparound_type.
+        */
+       enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
+
+       /* Some of chip expects fragment descriptor to be continuous
+        * memory for any TX operation. Set continuous_frag_desc flag
+        * for the hardware which have such requirement.
+        */
+       bool continuous_frag_desc;
+
+       /* CCK hardware rate table mapping for the newer chipsets
+        * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
+        * are in a proper order with respect to the rate/preamble
+        */
+       bool cck_rate_map_rev2;
+
+       u32 channel_counters_freq_hz;
+
+       /* Mgmt tx descriptors threshold for limiting probe response
+        * frames.
+        */
+       u32 max_probe_resp_desc_thres;
+
+       u32 tx_chain_mask;
+       u32 rx_chain_mask;
+       u32 max_spatial_stream;
+       u32 cal_data_len;
+
+       struct ath10k_hw_params_fw {
+               const char *dir;
+               const char *board;
+               size_t board_size;
+               size_t board_ext_size;
+       } fw;
+
+       /* qca99x0 family chips deliver broadcast/multicast management
+        * frames encrypted and expect software do decryption.
+        */
+       bool sw_decrypt_mcast_mgmt;
+
+       const struct ath10k_hw_ops *hw_ops;
+};
+
+struct htt_rx_desc;
+
+/* Defines needed for Rx descriptor abstraction */
+struct ath10k_hw_ops {
+       int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+};
+
+extern const struct ath10k_hw_ops qca988x_ops;
+extern const struct ath10k_hw_ops qca99x0_ops;
+
+static inline int
+ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
+                               struct htt_rx_desc *rxd)
+{
+       if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
+               return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
+       return 0;
+}
+
 /* Target specific defines for MAIN firmware */
 #define TARGET_NUM_VDEVS                       8
 #define TARGET_NUM_PEER_AST                    2
index 0bbd0a0..0a44dab 100644 (file)
@@ -824,7 +824,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
                 */
                for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
                        if (ar->peer_map[i] == peer) {
-                               ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n",
+                               ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
                                            peer->addr, peer, i);
                                ar->peer_map[i] = NULL;
                        }
@@ -3255,6 +3255,8 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
        if (ar->htt.target_version_major < 3 &&
            (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
            !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+                     ar->running_fw->fw_file.fw_features) &&
+           !test_bit(ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR,
                      ar->running_fw->fw_file.fw_features))
                return ATH10K_HW_TXRX_MGMT;
 
@@ -3524,7 +3526,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
 
        if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
                if (!ath10k_mac_tx_frm_has_freq(ar)) {
-                       ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+                       ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
                                   skb);
 
                        skb_queue_tail(&ar->offchan_tx_queue, skb);
@@ -3586,7 +3588,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
 
                mutex_lock(&ar->conf_mutex);
 
-               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
+               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
                           skb);
 
                hdr = (struct ieee80211_hdr *)skb->data;
@@ -3643,7 +3645,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                time_left =
                wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
                if (time_left == 0)
-                       ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
+                       ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
                                    skb);
 
                if (!peer && tmp_peer_created) {
@@ -3777,7 +3779,9 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
        enum ath10k_hw_txrx_mode txmode;
        enum ath10k_mac_tx_path txpath;
        struct sk_buff *skb;
+       struct ieee80211_hdr *hdr;
        size_t skb_len;
+       bool is_mgmt, is_presp;
        int ret;
 
        spin_lock_bh(&ar->htt.tx_lock);
@@ -3801,6 +3805,22 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
        skb_len = skb->len;
        txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
        txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+       is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
+
+       if (is_mgmt) {
+               hdr = (struct ieee80211_hdr *)skb->data;
+               is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+
+               spin_lock_bh(&ar->htt.tx_lock);
+               ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+
+               if (ret) {
+                       ath10k_htt_tx_dec_pending(htt);
+                       spin_unlock_bh(&ar->htt.tx_lock);
+                       return ret;
+               }
+               spin_unlock_bh(&ar->htt.tx_lock);
+       }
 
        ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
        if (unlikely(ret)) {
@@ -3808,6 +3828,8 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
 
                spin_lock_bh(&ar->htt.tx_lock);
                ath10k_htt_tx_dec_pending(htt);
+               if (is_mgmt)
+                       ath10k_htt_tx_mgmt_dec_pending(htt);
                spin_unlock_bh(&ar->htt.tx_lock);
 
                return ret;
@@ -3894,7 +3916,7 @@ void __ath10k_scan_finish(struct ath10k *ar)
                ar->scan.roc_freq = 0;
                ath10k_offchan_tx_purge(ar);
                cancel_delayed_work(&ar->scan.timeout);
-               complete_all(&ar->scan.completed);
+               complete(&ar->scan.completed);
                break;
        }
 }
@@ -4100,13 +4122,29 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
 {
        struct ath10k *ar = hw->priv;
        struct ath10k_txq *artxq = (void *)txq->drv_priv;
+       struct ieee80211_txq *f_txq;
+       struct ath10k_txq *f_artxq;
+       int ret = 0;
+       int max = 16;
 
        spin_lock_bh(&ar->txqs_lock);
        if (list_empty(&artxq->list))
                list_add_tail(&artxq->list, &ar->txqs);
+
+       f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+       f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
+       list_del_init(&f_artxq->list);
+
+       while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
+               ret = ath10k_mac_tx_push_txq(hw, f_txq);
+               if (ret)
+                       break;
+       }
+       if (ret != -ENOENT)
+               list_add_tail(&f_artxq->list, &ar->txqs);
        spin_unlock_bh(&ar->txqs_lock);
 
-       ath10k_mac_tx_push_pending(ar);
+       ath10k_htt_tx_txq_update(hw, f_txq);
        ath10k_htt_tx_txq_update(hw, txq);
 }
 
@@ -5186,7 +5224,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
 
        ret = ath10k_monitor_recalc(ar);
        if (ret)
-               ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
+               ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
 
        mutex_unlock(&ar->conf_mutex);
 }
@@ -5984,8 +6022,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                 * Existing station deletion.
                 */
                ath10k_dbg(ar, ATH10K_DBG_MAC,
-                          "mac vdev %d peer delete %pM (sta gone)\n",
-                          arvif->vdev_id, sta->addr);
+                          "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
+                          arvif->vdev_id, sta->addr, sta);
 
                ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
                if (ret)
@@ -6001,7 +6039,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                                continue;
 
                        if (peer->sta == sta) {
-                               ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n",
+                               ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
                                            sta->addr, peer, i, arvif->vdev_id);
                                peer->sta = NULL;
 
@@ -6538,7 +6576,7 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
                goto exit;
        }
 
-       ath10k_mac_update_bss_chan_survey(ar, survey->channel);
+       ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
 
        spin_lock_bh(&ar->data_lock);
        memcpy(survey, ar_survey, sizeof(*survey));
@@ -7134,7 +7172,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
        struct ath10k *ar = hw->priv;
 
        ath10k_dbg(ar, ATH10K_DBG_MAC,
-                  "mac chanctx add freq %hu width %d ptr %p\n",
+                  "mac chanctx add freq %hu width %d ptr %pK\n",
                   ctx->def.chan->center_freq, ctx->def.width, ctx);
 
        mutex_lock(&ar->conf_mutex);
@@ -7158,7 +7196,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
        struct ath10k *ar = hw->priv;
 
        ath10k_dbg(ar, ATH10K_DBG_MAC,
-                  "mac chanctx remove freq %hu width %d ptr %p\n",
+                  "mac chanctx remove freq %hu width %d ptr %pK\n",
                   ctx->def.chan->center_freq, ctx->def.width, ctx);
 
        mutex_lock(&ar->conf_mutex);
@@ -7223,7 +7261,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
        mutex_lock(&ar->conf_mutex);
 
        ath10k_dbg(ar, ATH10K_DBG_MAC,
-                  "mac chanctx change freq %hu width %d ptr %p changed %x\n",
+                  "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
                   ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
 
        /* This shouldn't really happen because channel switching should use
@@ -7281,7 +7319,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
        mutex_lock(&ar->conf_mutex);
 
        ath10k_dbg(ar, ATH10K_DBG_MAC,
-                  "mac chanctx assign ptr %p vdev_id %i\n",
+                  "mac chanctx assign ptr %pK vdev_id %i\n",
                   ctx, arvif->vdev_id);
 
        if (WARN_ON(arvif->is_started)) {
@@ -7342,7 +7380,7 @@ ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
        mutex_lock(&ar->conf_mutex);
 
        ath10k_dbg(ar, ATH10K_DBG_MAC,
-                  "mac chanctx unassign ptr %p vdev_id %i\n",
+                  "mac chanctx unassign ptr %pK vdev_id %i\n",
                   ctx, arvif->vdev_id);
 
        WARN_ON(!arvif->is_started);
index 9a22c47..0457e31 100644 (file)
@@ -1506,12 +1506,10 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
        ath10k_ce_per_engine_service(ar, pipe);
 }
 
-void ath10k_pci_kill_tasklet(struct ath10k *ar)
+static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-       tasklet_kill(&ar_pci->intr_tq);
-
        del_timer_sync(&ar_pci->rx_post_retry);
 }
 
@@ -1570,7 +1568,7 @@ void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
                                                 ul_pipe, dl_pipe);
 }
 
-static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
 {
        u32 val;
 
@@ -1693,14 +1691,12 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
 {
        struct ath10k *ar;
-       struct ath10k_pci *ar_pci;
        struct ath10k_ce_pipe *ce_pipe;
        struct ath10k_ce_ring *ce_ring;
        struct sk_buff *skb;
        int i;
 
        ar = pci_pipe->hif_ce_state;
-       ar_pci = ath10k_pci_priv(ar);
        ce_pipe = pci_pipe->ce_hdl;
        ce_ring = ce_pipe->src_ring;
 
@@ -1753,7 +1749,7 @@ void ath10k_pci_ce_deinit(struct ath10k *ar)
 
 void ath10k_pci_flush(struct ath10k *ar)
 {
-       ath10k_pci_kill_tasklet(ar);
+       ath10k_pci_rx_retry_sync(ar);
        ath10k_pci_buffer_cleanup(ar);
 }
 
@@ -1780,6 +1776,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
        ath10k_pci_irq_disable(ar);
        ath10k_pci_irq_sync(ar);
        ath10k_pci_flush(ar);
+       napi_synchronize(&ar->napi);
+       napi_disable(&ar->napi);
 
        spin_lock_irqsave(&ar_pci->ps_lock, flags);
        WARN_ON(ar_pci->ps_wake_refcount > 0);
@@ -2533,6 +2531,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
                ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
                goto err_ce;
        }
+       napi_enable(&ar->napi);
 
        return 0;
 
@@ -2725,7 +2724,7 @@ static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
        return 0;
 
 err_free:
-       kfree(data);
+       kfree(caldata);
 
        return -EINVAL;
 }
@@ -2772,35 +2771,53 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
                return IRQ_NONE;
        }
 
-       if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
-               if (!ath10k_pci_irq_pending(ar))
-                       return IRQ_NONE;
-
-               ath10k_pci_disable_and_clear_legacy_irq(ar);
-       }
+       if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
+           !ath10k_pci_irq_pending(ar))
+               return IRQ_NONE;
 
-       tasklet_schedule(&ar_pci->intr_tq);
+       ath10k_pci_disable_and_clear_legacy_irq(ar);
+       ath10k_pci_irq_msi_fw_mask(ar);
+       napi_schedule(&ar->napi);
 
        return IRQ_HANDLED;
 }
 
-static void ath10k_pci_tasklet(unsigned long data)
+static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
 {
-       struct ath10k *ar = (struct ath10k *)data;
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+       int done = 0;
 
        if (ath10k_pci_has_fw_crashed(ar)) {
-               ath10k_pci_irq_disable(ar);
                ath10k_pci_fw_crashed_clear(ar);
                ath10k_pci_fw_crashed_dump(ar);
-               return;
+               napi_complete(ctx);
+               return done;
        }
 
        ath10k_ce_per_engine_service_any(ar);
 
-       /* Re-enable legacy irq that was disabled in the irq handler */
-       if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
+       done = ath10k_htt_txrx_compl_task(ar, budget);
+
+       if (done < budget) {
+               napi_complete(ctx);
+               /* In case of MSI, it is possible that interrupts are received
+                * while NAPI poll is inprogress. So pending interrupts that are
+                * received after processing all copy engine pipes by NAPI poll
+                * will not be handled again. This is causing failure to
+                * complete boot sequence in x86 platform. So before enabling
+                * interrupts safer to check for pending interrupts for
+                * immediate servicing.
+                */
+               if (CE_INTERRUPT_SUMMARY(ar)) {
+                       napi_reschedule(ctx);
+                       goto out;
+               }
                ath10k_pci_enable_legacy_irq(ar);
+               ath10k_pci_irq_msi_fw_unmask(ar);
+       }
+
+out:
+       return done;
 }
 
 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
@@ -2858,11 +2875,10 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
        free_irq(ar_pci->pdev->irq, ar);
 }
 
-void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
+void ath10k_pci_init_napi(struct ath10k *ar)
 {
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-       tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
+       netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
+                      ATH10K_NAPI_BUDGET);
 }
 
 static int ath10k_pci_init_irq(struct ath10k *ar)
@@ -2870,7 +2886,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret;
 
-       ath10k_pci_init_irq_tasklets(ar);
+       ath10k_pci_init_napi(ar);
 
        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
                ath10k_info(ar, "limiting irq mode to: %d\n",
@@ -3062,7 +3078,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
                goto err_master;
        }
 
-       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
        return 0;
 
 err_master:
@@ -3131,7 +3147,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
 
 void ath10k_pci_release_resource(struct ath10k *ar)
 {
-       ath10k_pci_kill_tasklet(ar);
+       ath10k_pci_rx_retry_sync(ar);
+       netif_napi_del(&ar->napi);
        ath10k_pci_ce_deinit(ar);
        ath10k_pci_free_pipes(ar);
 }
@@ -3162,7 +3179,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                pci_hard_reset = ath10k_pci_qca988x_chip_reset;
                break;
        case QCA9887_1_0_DEVICE_ID:
-               dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n");
                hw_rev = ATH10K_HW_QCA9887;
                pci_ps = false;
                pci_soft_reset = ath10k_pci_warm_reset;
@@ -3298,7 +3314,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 
 err_free_irq:
        ath10k_pci_free_irq(ar);
-       ath10k_pci_kill_tasklet(ar);
+       ath10k_pci_rx_retry_sync(ar);
 
 err_deinit_irq:
        ath10k_pci_deinit_irq(ar);
index 6eca1df..9854ad5 100644 (file)
@@ -177,8 +177,6 @@ struct ath10k_pci {
        /* Operating interrupt mode */
        enum ath10k_pci_irq_mode oper_irq_mode;
 
-       struct tasklet_struct intr_tq;
-
        struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
 
        /* Copy Engine used for Diagnostic Accesses */
@@ -294,8 +292,7 @@ void ath10k_pci_free_pipes(struct ath10k *ar);
 void ath10k_pci_free_pipes(struct ath10k *ar);
 void ath10k_pci_rx_replenish_retry(unsigned long ptr);
 void ath10k_pci_ce_deinit(struct ath10k *ar);
-void ath10k_pci_init_irq_tasklets(struct ath10k *ar);
-void ath10k_pci_kill_tasklet(struct ath10k *ar);
+void ath10k_pci_init_napi(struct ath10k *ar);
 int ath10k_pci_init_pipes(struct ath10k *ar);
 int ath10k_pci_init_config(struct ath10k *ar);
 void ath10k_pci_rx_post(struct ath10k *ar);
@@ -303,6 +300,7 @@ void ath10k_pci_flush(struct ath10k *ar);
 void ath10k_pci_enable_legacy_irq(struct ath10k *ar);
 bool ath10k_pci_irq_pending(struct ath10k *ar);
 void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar);
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar);
 int ath10k_pci_wait_for_target_init(struct ath10k *ar);
 int ath10k_pci_setup_resource(struct ath10k *ar);
 void ath10k_pci_release_resource(struct ath10k *ar);
index 0c5f586..adf4592 100644 (file)
@@ -134,17 +134,18 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
        return seg_info;
 }
 
-int ath10k_swap_code_seg_configure(struct ath10k *ar)
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+                                  const struct ath10k_fw_file *fw_file)
 {
        int ret;
        struct ath10k_swap_code_seg_info *seg_info = NULL;
 
-       if (!ar->swap.firmware_swap_code_seg_info)
+       if (!fw_file->firmware_swap_code_seg_info)
                return 0;
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
 
-       seg_info = ar->swap.firmware_swap_code_seg_info;
+       seg_info = fw_file->firmware_swap_code_seg_info;
 
        ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
                                      &seg_info->seg_hw_info,
@@ -158,28 +159,29 @@ int ath10k_swap_code_seg_configure(struct ath10k *ar)
        return 0;
 }
 
-void ath10k_swap_code_seg_release(struct ath10k *ar)
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+                                 struct ath10k_fw_file *fw_file)
 {
-       ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
+       ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info);
 
        /* FIXME: these two assignments look to bein wrong place! Shouldn't
         * they be in ath10k_core_free_firmware_files() like the rest?
         */
-       ar->normal_mode_fw.fw_file.codeswap_data = NULL;
-       ar->normal_mode_fw.fw_file.codeswap_len = 0;
+       fw_file->codeswap_data = NULL;
+       fw_file->codeswap_len = 0;
 
-       ar->swap.firmware_swap_code_seg_info = NULL;
+       fw_file->firmware_swap_code_seg_info = NULL;
 }
 
-int ath10k_swap_code_seg_init(struct ath10k *ar)
+int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file)
 {
        int ret;
        struct ath10k_swap_code_seg_info *seg_info;
        const void *codeswap_data;
        size_t codeswap_len;
 
-       codeswap_data = ar->normal_mode_fw.fw_file.codeswap_data;
-       codeswap_len = ar->normal_mode_fw.fw_file.codeswap_len;
+       codeswap_data = fw_file->codeswap_data;
+       codeswap_len = fw_file->codeswap_len;
 
        if (!codeswap_len || !codeswap_data)
                return 0;
@@ -200,7 +202,7 @@ int ath10k_swap_code_seg_init(struct ath10k *ar)
                return ret;
        }
 
-       ar->swap.firmware_swap_code_seg_info = seg_info;
+       fw_file->firmware_swap_code_seg_info = seg_info;
 
        return 0;
 }
index 36991c7..f5dc047 100644 (file)
@@ -23,6 +23,8 @@
 /* Currently only one swap segment is supported */
 #define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED     1
 
+struct ath10k_fw_file;
+
 struct ath10k_swap_code_seg_tlv {
        __le32 address;
        __le32 length;
@@ -58,8 +60,11 @@ struct ath10k_swap_code_seg_info {
        dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
 };
 
-int ath10k_swap_code_seg_configure(struct ath10k *ar);
-void ath10k_swap_code_seg_release(struct ath10k *ar);
-int ath10k_swap_code_seg_init(struct ath10k *ar);
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+                                  const struct ath10k_fw_file *fw_file);
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+                                 struct ath10k_fw_file *fw_file);
+int ath10k_swap_code_seg_init(struct ath10k *ar,
+                             struct ath10k_fw_file *fw_file);
 
 #endif
index 120f423..ed85f93 100644 (file)
@@ -23,6 +23,7 @@
 #include "wmi.h"
 #include "hif.h"
 #include "hw.h"
+#include "core.h"
 
 #include "testmode_i.h"
 
@@ -45,7 +46,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
        int ret;
 
        ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
-                  "testmode event wmi cmd_id %d skb %p skb->len %d\n",
+                  "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
                   cmd_id, skb, skb->len);
 
        ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
@@ -240,6 +241,18 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
                goto err;
        }
 
+       if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+           ar->testmode.utf_mode_fw.fw_file.codeswap_len) {
+               ret = ath10k_swap_code_seg_init(ar,
+                                               &ar->testmode.utf_mode_fw.fw_file);
+               if (ret) {
+                       ath10k_warn(ar,
+                                   "failed to init utf code swap segment: %d\n",
+                                   ret);
+                       goto err_release_utf_mode_fw;
+               }
+       }
+
        spin_lock_bh(&ar->data_lock);
        ar->testmode.utf_monitor = true;
        spin_unlock_bh(&ar->data_lock);
@@ -279,6 +292,11 @@ err_power_down:
        ath10k_hif_power_down(ar);
 
 err_release_utf_mode_fw:
+       if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+           ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+               ath10k_swap_code_seg_release(ar,
+                                            &ar->testmode.utf_mode_fw.fw_file);
+
        release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
        ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
 
@@ -301,6 +319,11 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
 
        spin_unlock_bh(&ar->data_lock);
 
+       if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+           ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+               ath10k_swap_code_seg_release(ar,
+                                            &ar->testmode.utf_mode_fw.fw_file);
+
        release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
        ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
 
@@ -360,7 +383,7 @@ static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
        cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
 
        ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
-                  "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
+                  "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
                   cmd_id, buf, buf_len);
 
        ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
index 444b52c..0a47269 100644 (file)
@@ -192,7 +192,7 @@ int ath10k_thermal_register(struct ath10k *ar)
 
        /* Avoid linking error on devm_hwmon_device_register_with_groups, I
         * guess linux/hwmon.h is missing proper stubs. */
-       if (!config_enabled(CONFIG_HWMON))
+       if (!IS_REACHABLE(CONFIG_HWMON))
                return 0;
 
        hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
index b29a86a..9852c5d 100644 (file)
@@ -44,7 +44,7 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
        complete(&ar->offchan_tx_completed);
        ar->offchan_tx_skb = NULL; /* just for sanity */
 
-       ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+       ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
 out:
        spin_unlock_bh(&ar->data_lock);
 }
@@ -119,8 +119,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
        ieee80211_tx_status(htt->ar->hw, msdu);
        /* we do not own the msdu anymore */
 
-       ath10k_mac_tx_push_pending(ar);
-
        return 0;
 }
 
index 64ebd30..c9a8bb1 100644 (file)
@@ -51,6 +51,8 @@ struct wmi_ops {
                            struct wmi_roam_ev_arg *arg);
        int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
                              struct wmi_wow_ev_arg *arg);
+       int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
+                           struct wmi_echo_ev_arg *arg);
        enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
 
        struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
@@ -123,7 +125,7 @@ struct wmi_ops {
                                             enum wmi_force_fw_hang_type type,
                                             u32 delay_ms);
        struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
-       struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
+       struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
                                          u32 log_level);
        struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
        struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
@@ -194,6 +196,7 @@ struct wmi_ops {
        struct sk_buff *(*gen_pdev_bss_chan_info_req)
                                        (struct ath10k *ar,
                                         enum wmi_bss_survey_req_type type);
+       struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
 };
 
 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -349,6 +352,16 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
        return ar->wmi.ops->pull_wow_event(ar, skb, arg);
 }
 
+static inline int
+ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
+                       struct wmi_echo_ev_arg *arg)
+{
+       if (!ar->wmi.ops->pull_echo_ev)
+               return -EOPNOTSUPP;
+
+       return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
+}
+
 static inline enum wmi_txbf_conf
 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
 {
@@ -932,7 +945,7 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
 }
 
 static inline int
-ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
+ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
 {
        struct sk_buff *skb;
 
@@ -1382,4 +1395,20 @@ ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
                                   wmi->cmd->pdev_bss_chan_info_request_cmdid);
 }
 
+static inline int
+ath10k_wmi_echo(struct ath10k *ar, u32 value)
+{
+       struct ath10k_wmi *wmi = &ar->wmi;
+       struct sk_buff *skb;
+
+       if (!wmi->ops->gen_echo)
+               return -EOPNOTSUPP;
+
+       skb = wmi->ops->gen_echo(ar, value);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
+}
+
 #endif
index e09337e..e64f593 100644 (file)
@@ -1223,6 +1223,33 @@ ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
        return 0;
 }
 
+static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
+                                         struct sk_buff *skb,
+                                         struct wmi_echo_ev_arg *arg)
+{
+       const void **tb;
+       const struct wmi_echo_event *ev;
+       int ret;
+
+       tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+       if (IS_ERR(tb)) {
+               ret = PTR_ERR(tb);
+               ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
+       if (!ev) {
+               kfree(tb);
+               return -EPROTO;
+       }
+
+       arg->value = ev->value;
+
+       kfree(tb);
+       return 0;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
 {
@@ -2441,7 +2468,7 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
 }
 
 static struct sk_buff *
-ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
                                 u32 log_level) {
        struct wmi_tlv_dbglog_cmd *cmd;
        struct wmi_tlv *tlv;
@@ -3081,6 +3108,34 @@ ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
        return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
+{
+       struct wmi_echo_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       void *ptr;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       ptr = (void *)skb->data;
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+       cmd->value = cpu_to_le32(value);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*cmd);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
+       return skb;
+}
+
 /****************/
 /* TLV mappings */
 /****************/
@@ -3429,6 +3484,7 @@ static const struct wmi_ops wmi_tlv_ops = {
        .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
        .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
        .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+       .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
        .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
 
        .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
@@ -3485,6 +3541,7 @@ static const struct wmi_ops wmi_tlv_ops = {
        .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
        .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
        .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+       .gen_echo = ath10k_wmi_tlv_op_gen_echo,
 };
 
 static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
index d246288..38993d7 100644 (file)
@@ -29,6 +29,9 @@
 #include "p2p.h"
 #include "hw.h"
 
+#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
+#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
+
 /* MAIN WMI cmd track */
 static struct wmi_cmd_map wmi_cmd_map = {
        .init_cmdid = WMI_INIT_CMDID,
@@ -1874,7 +1877,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
        ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
        memcpy(cmd->buf, msdu->data, msdu->len);
 
-       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
                   msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
                   fc & IEEE80211_FCTL_STYPE);
        trace_ath10k_tx_hdr(ar, skb->data, skb->len);
@@ -2240,6 +2243,29 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
        return 0;
 }
 
+static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
+                                      struct ieee80211_hdr *hdr)
+{
+       if (!ieee80211_has_protected(hdr->frame_control))
+               return false;
+
+       /* FW delivers WEP Shared Auth frame with Protected Bit set and
+        * encrypted payload. However in case of PMF it delivers decrypted
+        * frames with Protected Bit set.
+        */
+       if (ieee80211_is_auth(hdr->frame_control))
+               return false;
+
+       /* qca99x0 based FW delivers broadcast or multicast management frames
+        * (ex: group privacy action frames in mesh) as encrypted payload.
+        */
+       if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
+           ar->hw_params.sw_decrypt_mcast_mgmt)
+               return false;
+
+       return true;
+}
+
 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_mgmt_rx_ev_arg arg = {};
@@ -2326,11 +2352,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 
        ath10k_wmi_handle_wep_reauth(ar, skb, status);
 
-       /* FW delivers WEP Shared Auth frame with Protected Bit set and
-        * encrypted payload. However in case of PMF it delivers decrypted
-        * frames with Protected Bit set. */
-       if (ieee80211_has_protected(hdr->frame_control) &&
-           !ieee80211_is_auth(hdr->frame_control)) {
+       if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
                status->flag |= RX_FLAG_DECRYPTED;
 
                if (!ieee80211_is_action(hdr->frame_control) &&
@@ -2347,7 +2369,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
                ath10k_mac_handle_beacon(ar, skb);
 
        ath10k_dbg(ar, ATH10K_DBG_MGMT,
-                  "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+                  "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
                   skb, skb->len,
                   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
 
@@ -2495,7 +2517,21 @@ exit:
 
 void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
 {
-       ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
+       struct wmi_echo_ev_arg arg = {};
+       int ret;
+
+       ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
+       if (ret) {
+               ath10k_warn(ar, "failed to parse echo: %d\n", ret);
+               return;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi event echo value 0x%08x\n",
+                  le32_to_cpu(arg.value));
+
+       if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
+               complete(&ar->wmi.barrier);
 }
 
 int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
@@ -3527,7 +3563,6 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                                ath10k_warn(ar, "failed to map beacon: %d\n",
                                            ret);
                                dev_kfree_skb_any(bcn);
-                               ret = -EIO;
                                goto skip;
                        }
 
@@ -4792,6 +4827,17 @@ static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
        return 0;
 }
 
+static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
+                                     struct sk_buff *skb,
+                                     struct wmi_echo_ev_arg *arg)
+{
+       struct wmi_echo_event *ev = (void *)skb->data;
+
+       arg->value = ev->value;
+
+       return 0;
+}
+
 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_rdy_ev_arg arg = {};
@@ -5124,6 +5170,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_cmd_hdr *cmd_hdr;
        enum wmi_10_2_event_id id;
+       bool consumed;
 
        cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
        id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -5133,6 +5180,18 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
 
        trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 
+       consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+       /* Ready event must be handled normally also in UTF mode so that we
+        * know the UTF firmware has booted, others we are just bypass WMI
+        * events to testmode.
+        */
+       if (consumed && id != WMI_10_2_READY_EVENTID) {
+               ath10k_dbg(ar, ATH10K_DBG_WMI,
+                          "wmi testmode consumed 0x%x\n", id);
+               goto out;
+       }
+
        switch (id) {
        case WMI_10_2_MGMT_RX_EVENTID:
                ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -5248,6 +5307,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_cmd_hdr *cmd_hdr;
        enum wmi_10_4_event_id id;
+       bool consumed;
 
        cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
        id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -5257,6 +5317,18 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
 
        trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 
+       consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+       /* Ready event must be handled normally also in UTF mode so that we
+        * know the UTF firmware has booted, others we are just bypass WMI
+        * events to testmode.
+        */
+       if (consumed && id != WMI_10_4_READY_EVENTID) {
+               ath10k_dbg(ar, ATH10K_DBG_WMI,
+                          "wmi testmode consumed 0x%x\n", id);
+               goto out;
+       }
+
        switch (id) {
        case WMI_10_4_MGMT_RX_EVENTID:
                ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -5306,6 +5378,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
        case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
+       case WMI_10_4_WDS_PEER_EVENTID:
                ath10k_dbg(ar, ATH10K_DBG_WMI,
                           "received event id %d not implemented\n", id);
                break;
@@ -6863,7 +6936,7 @@ ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
 }
 
 static struct sk_buff *
-ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
                             u32 log_level)
 {
        struct wmi_dbglog_cfg_cmd *cmd;
@@ -6900,6 +6973,44 @@ ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
        return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
+                                 u32 log_level)
+{
+       struct wmi_10_4_dbglog_cfg_cmd *cmd;
+       struct sk_buff *skb;
+       u32 cfg;
+
+       skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
+
+       if (module_enable) {
+               cfg = SM(log_level,
+                        ATH10K_DBGLOG_CFG_LOG_LVL);
+       } else {
+               /* set back defaults, all modules with WARN level */
+               cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+                        ATH10K_DBGLOG_CFG_LOG_LVL);
+               module_enable = ~0;
+       }
+
+       cmd->module_enable = __cpu_to_le64(module_enable);
+       cmd->module_valid = __cpu_to_le64(~0);
+       cmd->config_enable = __cpu_to_le32(cfg);
+       cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
+                  __le64_to_cpu(cmd->module_enable),
+                  __le64_to_cpu(cmd->module_valid),
+                  __le32_to_cpu(cmd->config_enable),
+                  __le32_to_cpu(cmd->config_valid));
+       return skb;
+}
+
 static struct sk_buff *
 ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
 {
@@ -7649,6 +7760,48 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
        return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
+{
+       struct wmi_echo_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       cmd = (struct wmi_echo_cmd *)skb->data;
+       cmd->value = cpu_to_le32(value);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi echo value 0x%08x\n", value);
+       return skb;
+}
+
+int
+ath10k_wmi_barrier(struct ath10k *ar)
+{
+       int ret;
+       int time_left;
+
+       spin_lock_bh(&ar->data_lock);
+       reinit_completion(&ar->wmi.barrier);
+       spin_unlock_bh(&ar->data_lock);
+
+       ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
+       if (ret) {
+               ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
+               return ret;
+       }
+
+       time_left = wait_for_completion_timeout(&ar->wmi.barrier,
+                                               ATH10K_WMI_BARRIER_TIMEOUT_HZ);
+       if (!time_left)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
 static const struct wmi_ops wmi_ops = {
        .rx = ath10k_wmi_op_rx,
        .map_svc = wmi_main_svc_map,
@@ -7665,6 +7818,7 @@ static const struct wmi_ops wmi_ops = {
        .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
        .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
        .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+       .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
 
        .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7709,6 +7863,7 @@ static const struct wmi_ops wmi_ops = {
        .gen_delba_send = ath10k_wmi_op_gen_delba_send,
        .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
        .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+       .gen_echo = ath10k_wmi_op_gen_echo,
        /* .gen_bcn_tmpl not implemented */
        /* .gen_prb_tmpl not implemented */
        /* .gen_p2p_go_bcn_ie not implemented */
@@ -7738,6 +7893,7 @@ static const struct wmi_ops wmi_10_1_ops = {
        .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
        .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
        .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+       .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
 
        .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7777,6 +7933,7 @@ static const struct wmi_ops wmi_10_1_ops = {
        .gen_delba_send = ath10k_wmi_op_gen_delba_send,
        .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
        .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+       .gen_echo = ath10k_wmi_op_gen_echo,
        /* .gen_bcn_tmpl not implemented */
        /* .gen_prb_tmpl not implemented */
        /* .gen_p2p_go_bcn_ie not implemented */
@@ -7796,6 +7953,7 @@ static const struct wmi_ops wmi_10_2_ops = {
        .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
        .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
        .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+       .gen_echo = ath10k_wmi_op_gen_echo,
 
        .pull_scan = ath10k_wmi_op_pull_scan_ev,
        .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
@@ -7807,6 +7965,7 @@ static const struct wmi_ops wmi_10_2_ops = {
        .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
        .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
        .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+       .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
 
        .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7862,6 +8021,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
        .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
        .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
        .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+       .gen_echo = ath10k_wmi_op_gen_echo,
 
        .pull_scan = ath10k_wmi_op_pull_scan_ev,
        .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
@@ -7873,6 +8033,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
        .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
        .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
        .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+       .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
 
        .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7968,7 +8129,7 @@ static const struct wmi_ops wmi_10_4_ops = {
        .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
        .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
        .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
-       .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+       .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
        .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
        .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
        .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
@@ -7980,10 +8141,12 @@ static const struct wmi_ops wmi_10_4_ops = {
        .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
 
        /* shared with 10.2 */
+       .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
        .gen_request_stats = ath10k_wmi_op_gen_request_stats,
        .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
        .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
        .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
+       .gen_echo = ath10k_wmi_op_gen_echo,
 };
 
 int ath10k_wmi_attach(struct ath10k *ar)
@@ -8036,6 +8199,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
 
        init_completion(&ar->wmi.service_ready);
        init_completion(&ar->wmi.unified_ready);
+       init_completion(&ar->wmi.barrier);
 
        INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
 
index 3ef4688..48e04b9 100644 (file)
@@ -180,6 +180,7 @@ enum wmi_service {
        WMI_SERVICE_MESH_NON_11S,
        WMI_SERVICE_PEER_STATS,
        WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+       WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
        WMI_SERVICE_TX_MODE_PUSH_ONLY,
        WMI_SERVICE_TX_MODE_PUSH_PULL,
        WMI_SERVICE_TX_MODE_DYNAMIC,
@@ -305,6 +306,7 @@ enum wmi_10_4_service {
        WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
        WMI_10_4_SERVICE_PEER_STATS,
        WMI_10_4_SERVICE_MESH_11S,
+       WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
        WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
        WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
        WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
@@ -402,6 +404,7 @@ static inline char *wmi_service_name(int service_id)
        SVCSTR(WMI_SERVICE_MESH_NON_11S);
        SVCSTR(WMI_SERVICE_PEER_STATS);
        SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+       SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT);
        SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
        SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
        SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
@@ -652,6 +655,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
               WMI_SERVICE_PEER_STATS, len);
        SVCMAP(WMI_10_4_SERVICE_MESH_11S,
               WMI_SERVICE_MESH_11S, len);
+       SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+              WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len);
        SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
               WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
        SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
@@ -6169,6 +6174,20 @@ struct wmi_dbglog_cfg_cmd {
        __le32 config_valid;
 } __packed;
 
+struct wmi_10_4_dbglog_cfg_cmd {
+       /* bitmask to hold mod id config*/
+       __le64 module_enable;
+
+       /* see ATH10K_DBGLOG_CFG_ */
+       __le32 config_enable;
+
+       /* mask of module id bits to be changed */
+       __le64 module_valid;
+
+       /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+       __le32 config_valid;
+} __packed;
+
 enum wmi_roam_reason {
        WMI_ROAM_REASON_BETTER_AP = 1,
        WMI_ROAM_REASON_BEACON_MISS = 2,
@@ -6296,6 +6315,10 @@ struct wmi_roam_ev_arg {
        __le32 rssi;
 };
 
+struct wmi_echo_ev_arg {
+       __le32 value;
+};
+
 struct wmi_pdev_temperature_event {
        /* temperature value in Celcius degree */
        __le32 temperature;
@@ -6624,5 +6647,6 @@ void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
                                      char *buf);
 int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
                                   enum wmi_vdev_subtype subtype);
+int ath10k_wmi_barrier(struct ath10k *ar);
 
 #endif /* _WMI_H_ */
index 929d7cc..4f8d9ed 100644 (file)
@@ -909,7 +909,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
        struct ath5k_hw *ah = inode->i_private;
        bool res;
        int i, ret;
-       u32 eesize;
+       u32 eesize;     /* NB: in 16-bit words */
        u16 val, *buf;
 
        /* Get eeprom size */
@@ -932,7 +932,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
 
        /* Create buffer and read in eeprom */
 
-       buf = vmalloc(eesize);
+       buf = vmalloc(eesize * 2);
        if (!buf) {
                ret = -ENOMEM;
                goto err;
@@ -952,7 +952,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
        }
 
        ep->buf = buf;
-       ep->len = i;
+       ep->len = eesize * 2;
 
        file->private_data = (void *)ep;
 
index 72e2ec6..b7fe0af 100644 (file)
@@ -1449,14 +1449,14 @@ static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
                return -EIO;
 
        if (test_bit(CONNECTED, &vif->flags)) {
-               ar->tx_pwr = 0;
+               ar->tx_pwr = 255;
 
                if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
                        ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
                        return -EIO;
                }
 
-               wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
+               wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 255,
                                                 5 * HZ);
 
                if (signal_pending(current)) {
index 18c0708..d194253 100644 (file)
@@ -64,7 +64,7 @@ int ath6kl_hif_rw_comp_handler(void *context, int status)
 }
 EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler);
 
-#define REG_DUMP_COUNT_AR6003   60
+#define REGISTER_DUMP_COUNT     60
 #define REGISTER_DUMP_LEN_MAX   60
 
 static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
@@ -73,9 +73,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
        u32 i, address, regdump_addr = 0;
        int ret;
 
-       if (ar->target_type != TARGET_TYPE_AR6003)
-               return;
-
        /* the reg dump pointer is copied to the host interest area */
        address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
        address = TARG_VTOP(ar->target_type, address);
@@ -95,7 +92,7 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
 
        /* fetch register dump data */
        ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
-                                 REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
+                                 REGISTER_DUMP_COUNT * (sizeof(u32)));
        if (ret) {
                ath6kl_warn("failed to get register dump: %d\n", ret);
                return;
@@ -105,9 +102,9 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
        ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
                    ar->wiphy->fw_version);
 
-       BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
+       BUILD_BUG_ON(REGISTER_DUMP_COUNT % 4);
 
-       for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) {
+       for (i = 0; i < REGISTER_DUMP_COUNT; i += 4) {
                ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
                            i,
                            le32_to_cpu(regdump_val[i]),
index 1b271b9..8eea8d2 100644 (file)
@@ -260,8 +260,8 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
        int cur_bin;
        int upper, lower, cur_vit_mask;
        int i;
-       int8_t mask_m[123];
-       int8_t mask_p[123];
+       int8_t mask_m[123] = {0};
+       int8_t mask_p[123] = {0};
        int8_t mask_amt;
        int tmp_mask;
        static const int pilot_mask_reg[4] = {
@@ -274,9 +274,6 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
        };
        static const int inc[4] = { 0, 100, 0, 0 };
 
-       memset(&mask_m, 0, sizeof(int8_t) * 123);
-       memset(&mask_p, 0, sizeof(int8_t) * 123);
-
        cur_bin = -6000;
        upper = bin + 100;
        lower = bin - 100;
@@ -302,7 +299,7 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
        upper = bin + 120;
        lower = bin - 120;
 
-       for (i = 0; i < 123; i++) {
+       for (i = 0; i < ARRAY_SIZE(mask_m); i++) {
                if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
                        /* workaround for gcc bug #37014 */
                        volatile int tmp_v = abs(cur_vit_mask - bin);
index 5bd2cba..08607d7 100644 (file)
@@ -3252,7 +3252,8 @@ static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
        int i;
 
        for (i = 0; i < mdata_size / 2; i++, data++)
-               ath9k_hw_nvram_read(ah, i, data);
+               if (!ath9k_hw_nvram_read(ah, i, data))
+                       return -EIO;
 
        return 0;
 }
@@ -3282,7 +3283,8 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
        if (ath9k_hw_use_flash(ah)) {
                u8 txrx;
 
-               ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
+               if (ar9300_eeprom_restore_flash(ah, mptr, mdata_size))
+                       return -EIO;
 
                /* check if eeprom contains valid data */
                eep = (struct ar9300_eeprom *) mptr;
index 490f74d..ddb2886 100644 (file)
@@ -22,7 +22,7 @@
 
 #ifdef CONFIG_MAC80211_LEDS
 
-void ath_fill_led_pin(struct ath_softc *sc)
+static void ath_fill_led_pin(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
 
index d1d0c06..14b13f0 100644 (file)
@@ -2482,6 +2482,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
                return -EINVAL;
        }
 
+       ath9k_gpio_cap_init(ah);
+
        if (AR_SREV_9485(ah) ||
            AR_SREV_9285(ah) ||
            AR_SREV_9330(ah) ||
@@ -2531,8 +2533,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
        else
                pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
 
-       ath9k_gpio_cap_init(ah);
-
        if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
                pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
        else
index a394622..e9f32b5 100644 (file)
@@ -718,9 +718,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
        if (!ath_complete_reset(sc, false))
                ah->reset_power_on = false;
 
-       if (ah->led_pin >= 0)
+       if (ah->led_pin >= 0) {
                ath9k_hw_set_gpio(ah, ah->led_pin,
                                  (ah->config.led_active_high) ? 1 : 0);
+               ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL,
+                                         AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+       }
 
        /*
         * Reset key cache to sane defaults (all entries cleared) instead of
@@ -864,9 +867,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 
        spin_lock_bh(&sc->sc_pcu_lock);
 
-       if (ah->led_pin >= 0)
+       if (ah->led_pin >= 0) {
                ath9k_hw_set_gpio(ah, ah->led_pin,
                                  (ah->config.led_active_high) ? 0 : 1);
+               ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL);
+       }
 
        ath_prepare_reset(sc);
 
@@ -919,7 +924,7 @@ static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data,
        } else {
                if (iter_data->primary_beacon_vif->type != NL80211_IFTYPE_AP &&
                    vif->type == NL80211_IFTYPE_AP)
-               iter_data->primary_beacon_vif = vif;
+                       iter_data->primary_beacon_vif = vif;
        }
 
        iter_data->beacons = true;
@@ -1154,6 +1159,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
                bool changed = (iter_data.primary_sta != ctx->primary_sta);
 
                if (iter_data.primary_sta) {
+                       iter_data.primary_beacon_vif = iter_data.primary_sta;
                        iter_data.beacons = true;
                        ath9k_set_assoc_state(sc, iter_data.primary_sta,
                                              changed);
@@ -1563,13 +1569,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw,
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        int ret = 0;
 
-       if (old_state == IEEE80211_STA_AUTH &&
-           new_state == IEEE80211_STA_ASSOC) {
+       if (old_state == IEEE80211_STA_NOTEXIST &&
+           new_state == IEEE80211_STA_NONE) {
                ret = ath9k_sta_add(hw, vif, sta);
                ath_dbg(common, CONFIG,
                        "Add station: %pM\n", sta->addr);
-       } else if (old_state == IEEE80211_STA_ASSOC &&
-                  new_state == IEEE80211_STA_AUTH) {
+       } else if (old_state == IEEE80211_STA_NONE &&
+                  new_state == IEEE80211_STA_NOTEXIST) {
                ret = ath9k_sta_remove(hw, vif, sta);
                ath_dbg(common, CONFIG,
                        "Remove station: %pM\n", sta->addr);
index 8ddd604..52bfbb9 100644 (file)
@@ -50,9 +50,11 @@ static u16 bits_per_symbol[][2] = {
 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
                               struct ath_atx_tid *tid, struct sk_buff *skb);
 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
-                           int tx_flags, struct ath_txq *txq);
+                           int tx_flags, struct ath_txq *txq,
+                           struct ieee80211_sta *sta);
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
                                struct ath_txq *txq, struct list_head *bf_q,
+                               struct ieee80211_sta *sta,
                                struct ath_tx_status *ts, int txok);
 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                             struct list_head *head, bool internal);
@@ -77,6 +79,22 @@ enum {
 /* Aggregation logic */
 /*********************/
 
+static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_sta *sta = info->status.status_driver_data[0];
+
+       if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+               ieee80211_tx_status(hw, skb);
+               return;
+       }
+
+       if (sta)
+               ieee80211_tx_status_noskb(hw, sta, info);
+
+       dev_kfree_skb(skb);
+}
+
 void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
        __acquires(&txq->axq_lock)
 {
@@ -92,6 +110,7 @@ void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
        __releases(&txq->axq_lock)
 {
+       struct ieee80211_hw *hw = sc->hw;
        struct sk_buff_head q;
        struct sk_buff *skb;
 
@@ -100,7 +119,7 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
        spin_unlock_bh(&txq->axq_lock);
 
        while ((skb = __skb_dequeue(&q)))
-               ieee80211_tx_status(sc->hw, skb);
+               ath_tx_status(hw, skb);
 }
 
 static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
@@ -253,7 +272,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
                }
 
                list_add_tail(&bf->list, &bf_head);
-               ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+               ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
        }
 
        if (sendbar) {
@@ -318,12 +337,12 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
                bf = fi->bf;
 
                if (!bf) {
-                       ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
+                       ath_tx_complete(sc, skb, ATH_TX_ERROR, txq, NULL);
                        continue;
                }
 
                list_add_tail(&bf->list, &bf_head);
-               ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+               ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
        }
 }
 
@@ -426,15 +445,14 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
 
 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                                 struct ath_buf *bf, struct list_head *bf_q,
+                                struct ieee80211_sta *sta,
+                                struct ath_atx_tid *tid,
                                 struct ath_tx_status *ts, int txok)
 {
        struct ath_node *an = NULL;
        struct sk_buff *skb;
-       struct ieee80211_sta *sta;
-       struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *tx_info;
-       struct ath_atx_tid *tid = NULL;
        struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
        struct list_head bf_head;
        struct sk_buff_head bf_pending;
@@ -460,12 +478,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        for (i = 0; i < ts->ts_rateindex; i++)
                retries += rates[i].count;
 
-       rcu_read_lock();
-
-       sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
        if (!sta) {
-               rcu_read_unlock();
-
                INIT_LIST_HEAD(&bf_head);
                while (bf) {
                        bf_next = bf->bf_next;
@@ -473,7 +486,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                        if (!bf->bf_state.stale || bf_next != NULL)
                                list_move_tail(&bf->list, &bf_head);
 
-                       ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, ts, 0);
 
                        bf = bf_next;
                }
@@ -481,7 +494,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
        }
 
        an = (struct ath_node *)sta->drv_priv;
-       tid = ath_get_skb_tid(sc, an, skb);
        seq_first = tid->seq_start;
        isba = ts->ts_flags & ATH9K_TX_BA;
 
@@ -583,7 +595,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                                                                ts);
                        }
 
-                       ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
                                !txfail);
                } else {
                        if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
@@ -604,7 +616,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                                        ath_tx_update_baw(sc, tid, seqno);
 
                                        ath_tx_complete_buf(sc, bf, txq,
-                                                           &bf_head, ts, 0);
+                                                           &bf_head, NULL, ts,
+                                                           0);
                                        bar_index = max_t(int, bar_index,
                                                ATH_BA_INDEX(seq_first, seqno));
                                        break;
@@ -648,8 +661,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                ath_txq_lock(sc, txq);
        }
 
-       rcu_read_unlock();
-
        if (needreset)
                ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
 }
@@ -664,7 +675,11 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
                                  struct ath_tx_status *ts, struct ath_buf *bf,
                                  struct list_head *bf_head)
 {
+       struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_tx_info *info;
+       struct ieee80211_sta *sta;
+       struct ieee80211_hdr *hdr;
+       struct ath_atx_tid *tid = NULL;
        bool txok, flush;
 
        txok = !(ts->ts_status & ATH9K_TXERR_MASK);
@@ -677,6 +692,16 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
 
        ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc,
                                             ts->ts_rateindex);
+
+       hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
+       sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
+       if (sta) {
+               struct ath_node *an = (struct ath_node *)sta->drv_priv;
+               tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
+               if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
+                       tid->clear_ps_filter = true;
+       }
+
        if (!bf_isampdu(bf)) {
                if (!flush) {
                        info = IEEE80211_SKB_CB(bf->bf_mpdu);
@@ -685,9 +710,9 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
                        ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
                        ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
                }
-               ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
+               ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
        } else
-               ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
+               ath_tx_complete_aggr(sc, txq, bf, bf_head, sta, tid, ts, txok);
 
        if (!flush)
                ath_txq_schedule(sc, txq);
@@ -923,7 +948,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
                        list_add(&bf->list, &bf_head);
                        __skb_unlink(skb, *q);
                        ath_tx_update_baw(sc, tid, seqno);
-                       ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+                       ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
                        continue;
                }
 
@@ -1832,6 +1857,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
  */
 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
 {
+       rcu_read_lock();
        ath_txq_lock(sc, txq);
 
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@@ -1850,6 +1876,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
        ath_drain_txq_list(sc, txq, &txq->axq_q);
 
        ath_txq_unlock_complete(sc, txq);
+       rcu_read_unlock();
 }
 
 bool ath_drain_all_txq(struct ath_softc *sc)
@@ -2472,7 +2499,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 /*****************/
 
 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
-                           int tx_flags, struct ath_txq *txq)
+                           int tx_flags, struct ath_txq *txq,
+                           struct ieee80211_sta *sta)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2492,15 +2520,17 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
                        tx_info->flags |= IEEE80211_TX_STAT_ACK;
        }
 
-       padpos = ieee80211_hdrlen(hdr->frame_control);
-       padsize = padpos & 3;
-       if (padsize && skb->len>padpos+padsize) {
-               /*
-                * Remove MAC header padding before giving the frame back to
-                * mac80211.
-                */
-               memmove(skb->data + padsize, skb->data, padpos);
-               skb_pull(skb, padsize);
+       if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+               padpos = ieee80211_hdrlen(hdr->frame_control);
+               padsize = padpos & 3;
+               if (padsize && skb->len>padpos+padsize) {
+                       /*
+                        * Remove MAC header padding before giving the frame back to
+                        * mac80211.
+                        */
+                       memmove(skb->data + padsize, skb->data, padpos);
+                       skb_pull(skb, padsize);
+               }
        }
 
        spin_lock_irqsave(&sc->sc_pm_lock, flags);
@@ -2515,12 +2545,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
        }
        spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 
-       __skb_queue_tail(&txq->complete_q, skb);
        ath_txq_skb_done(sc, txq, skb);
+       tx_info->status.status_driver_data[0] = sta;
+       __skb_queue_tail(&txq->complete_q, skb);
 }
 
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
                                struct ath_txq *txq, struct list_head *bf_q,
+                               struct ieee80211_sta *sta,
                                struct ath_tx_status *ts, int txok)
 {
        struct sk_buff *skb = bf->bf_mpdu;
@@ -2548,7 +2580,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
                        complete(&sc->paprd_complete);
        } else {
                ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
-               ath_tx_complete(sc, skb, tx_flags, txq);
+               ath_tx_complete(sc, skb, tx_flags, txq, sta);
        }
 skip_tx_complete:
        /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
@@ -2700,10 +2732,12 @@ void ath_tx_tasklet(struct ath_softc *sc)
        u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
        int i;
 
+       rcu_read_lock();
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
                if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
                        ath_tx_processq(sc, &sc->tx.txq[i]);
        }
+       rcu_read_unlock();
 }
 
 void ath_tx_edma_tasklet(struct ath_softc *sc)
@@ -2717,6 +2751,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
        struct list_head *fifo_list;
        int status;
 
+       rcu_read_lock();
        for (;;) {
                if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
                        break;
@@ -2787,6 +2822,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
                ath_txq_unlock_complete(sc, txq);
        }
+       rcu_read_unlock();
 }
 
 /*****************/
index 76842e6..99ab203 100644 (file)
@@ -670,6 +670,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
        ar->readlen = outlen;
        spin_unlock_bh(&ar->cmd_lock);
 
+       reinit_completion(&ar->cmd_wait);
        err = __carl9170_exec_cmd(ar, &ar->cmd, false);
 
        if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
@@ -778,10 +779,7 @@ void carl9170_usb_stop(struct ar9170 *ar)
        spin_lock_bh(&ar->cmd_lock);
        ar->readlen = 0;
        spin_unlock_bh(&ar->cmd_lock);
-       complete_all(&ar->cmd_wait);
-
-       /* This is required to prevent an early completion on _start */
-       reinit_completion(&ar->cmd_wait);
+       complete(&ar->cmd_wait);
 
        /*
         * Note:
index 2f8136d..4100ffd 100644 (file)
@@ -338,7 +338,7 @@ static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
        return true;
 }
 
-static struct dfs_pattern_detector default_dpd = {
+static const struct dfs_pattern_detector default_dpd = {
        .exit           = dpd_exit,
        .set_dfs_domain = dpd_set_domain,
        .add_pulse      = dpd_add_pulse,
index f0e1175..d117240 100644 (file)
@@ -354,10 +354,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
        wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
                     __func__, wdev, wdev->iftype);
 
+       mutex_lock(&wil->p2p_wdev_mutex);
        if (wil->scan_request) {
                wil_err(wil, "Already scanning\n");
+               mutex_unlock(&wil->p2p_wdev_mutex);
                return -EAGAIN;
        }
+       mutex_unlock(&wil->p2p_wdev_mutex);
 
        /* check we are client side */
        switch (wdev->iftype) {
@@ -760,14 +763,11 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
        return rc;
 }
 
-static struct wil_tid_crypto_rx_single *
-wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
-                   enum wmi_key_usage key_usage, const u8 *mac_addr)
+static struct wil_sta_info *
+wil_find_sta_by_key_usage(struct wil6210_priv *wil,
+                         enum wmi_key_usage key_usage, const u8 *mac_addr)
 {
        int cid = -EINVAL;
-       int tid = 0;
-       struct wil_sta_info *s;
-       struct wil_tid_crypto_rx *c;
 
        if (key_usage == WMI_KEY_USE_TX_GROUP)
                return NULL; /* not needed */
@@ -778,18 +778,72 @@ wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
        else if (key_usage == WMI_KEY_USE_RX_GROUP)
                cid = wil_find_cid_by_idx(wil, 0);
        if (cid < 0) {
-               wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
-                       key_usage_str[key_usage], key_index);
+               wil_err(wil, "No CID for %pM %s\n", mac_addr,
+                       key_usage_str[key_usage]);
                return ERR_PTR(cid);
        }
 
-       s = &wil->sta[cid];
-       if (key_usage == WMI_KEY_USE_PAIRWISE)
-               c = &s->tid_crypto_rx[tid];
-       else
-               c = &s->group_crypto_rx;
+       return &wil->sta[cid];
+}
+
+static void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
+                             struct wil_sta_info *cs,
+                             struct key_params *params)
+{
+       struct wil_tid_crypto_rx_single *cc;
+       int tid;
+
+       if (!cs)
+               return;
 
-       return &c->key_id[key_index];
+       switch (key_usage) {
+       case WMI_KEY_USE_PAIRWISE:
+               for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+                       cc = &cs->tid_crypto_rx[tid].key_id[key_index];
+                       if (params->seq)
+                               memcpy(cc->pn, params->seq,
+                                      IEEE80211_GCMP_PN_LEN);
+                       else
+                               memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+                       cc->key_set = true;
+               }
+               break;
+       case WMI_KEY_USE_RX_GROUP:
+               cc = &cs->group_crypto_rx.key_id[key_index];
+               if (params->seq)
+                       memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
+               else
+                       memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+               cc->key_set = true;
+               break;
+       default:
+               break;
+       }
+}
+
+static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage,
+                          struct wil_sta_info *cs)
+{
+       struct wil_tid_crypto_rx_single *cc;
+       int tid;
+
+       if (!cs)
+               return;
+
+       switch (key_usage) {
+       case WMI_KEY_USE_PAIRWISE:
+               for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+                       cc = &cs->tid_crypto_rx[tid].key_id[key_index];
+                       cc->key_set = false;
+               }
+               break;
+       case WMI_KEY_USE_RX_GROUP:
+               cc = &cs->group_crypto_rx.key_id[key_index];
+               cc->key_set = false;
+               break;
+       default:
+               break;
+       }
 }
 
 static int wil_cfg80211_add_key(struct wiphy *wiphy,
@@ -801,24 +855,26 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
        int rc;
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
        enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
-       struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
-                                                                 key_index,
-                                                                 key_usage,
-                                                                 mac_addr);
+       struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
+                                                           mac_addr);
+
+       if (!params) {
+               wil_err(wil, "NULL params\n");
+               return -EINVAL;
+       }
 
        wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
                     mac_addr, key_usage_str[key_usage], key_index,
                     params->seq_len, params->seq);
 
-       if (IS_ERR(cc)) {
+       if (IS_ERR(cs)) {
                wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
                        __func__, mac_addr, key_usage_str[key_usage], key_index,
                        params->seq_len, params->seq);
                return -EINVAL;
        }
 
-       if (cc)
-               cc->key_set = false;
+       wil_del_rx_key(key_index, key_usage, cs);
 
        if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
                wil_err(wil,
@@ -831,13 +887,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
 
        rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
                                params->key, key_usage);
-       if ((rc == 0) && cc) {
-               if (params->seq)
-                       memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
-               else
-                       memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
-               cc->key_set = true;
-       }
+       if (!rc)
+               wil_set_crypto_rx(key_index, key_usage, cs, params);
 
        return rc;
 }
@@ -849,20 +900,18 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
        enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
-       struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
-                                                                 key_index,
-                                                                 key_usage,
-                                                                 mac_addr);
+       struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
+                                                           mac_addr);
 
        wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
                     key_usage_str[key_usage], key_index);
 
-       if (IS_ERR(cc))
+       if (IS_ERR(cs))
                wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
                         mac_addr, key_usage_str[key_usage], key_index);
 
-       if (!IS_ERR_OR_NULL(cc))
-               cc->key_set = false;
+       if (!IS_ERR_OR_NULL(cs))
+               wil_del_rx_key(key_index, key_usage, cs);
 
        return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
 }
@@ -1363,23 +1412,16 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
                                         struct wireless_dev *wdev)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-       u8 started;
+       struct wil_p2p_info *p2p = &wil->p2p;
+
+       if (!p2p->p2p_dev_started)
+               return;
 
        wil_dbg_misc(wil, "%s: entered\n", __func__);
        mutex_lock(&wil->mutex);
-       started = wil_p2p_stop_discovery(wil);
-       if (started && wil->scan_request) {
-               struct cfg80211_scan_info info = {
-                       .aborted = true,
-               };
-
-               cfg80211_scan_done(wil->scan_request, &info);
-               wil->scan_request = NULL;
-               wil->radio_wdev = wil->wdev;
-       }
+       wil_p2p_stop_radio_operations(wil);
+       p2p->p2p_dev_started = 0;
        mutex_unlock(&wil->mutex);
-
-       wil->p2p.p2p_dev_started = 0;
 }
 
 static struct cfg80211_ops wil_cfg80211_ops = {
@@ -1464,14 +1506,8 @@ struct wireless_dev *wil_cfg80211_init(struct device *dev)
        set_wiphy_dev(wdev->wiphy, dev);
        wil_wiphy_init(wdev->wiphy);
 
-       rc = wiphy_register(wdev->wiphy);
-       if (rc < 0)
-               goto out_failed_reg;
-
        return wdev;
 
-out_failed_reg:
-       wiphy_free(wdev->wiphy);
 out:
        kfree(wdev);
 
@@ -1487,7 +1523,6 @@ void wil_wdev_free(struct wil6210_priv *wil)
        if (!wdev)
                return;
 
-       wiphy_unregister(wdev->wiphy);
        wiphy_free(wdev->wiphy);
        kfree(wdev);
 }
@@ -1498,11 +1533,11 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
 
        mutex_lock(&wil->p2p_wdev_mutex);
        p2p_wdev = wil->p2p_wdev;
+       wil->p2p_wdev = NULL;
+       wil->radio_wdev = wil_to_wdev(wil);
+       mutex_unlock(&wil->p2p_wdev_mutex);
        if (p2p_wdev) {
-               wil->p2p_wdev = NULL;
-               wil->radio_wdev = wil_to_wdev(wil);
                cfg80211_unregister_wdev(p2p_wdev);
                kfree(p2p_wdev);
        }
-       mutex_unlock(&wil->p2p_wdev_mutex);
 }
index a8098b4..5e4058a 100644 (file)
@@ -1553,6 +1553,56 @@ static const struct file_operations fops_led_blink_time = {
        .open  = simple_open,
 };
 
+/*---------FW capabilities------------*/
+static int wil_fw_capabilities_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+
+       seq_printf(s, "fw_capabilities : %*pb\n", WMI_FW_CAPABILITY_MAX,
+                  wil->fw_capabilities);
+
+       return 0;
+}
+
+static int wil_fw_capabilities_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_fw_capabilities_debugfs_show,
+                          inode->i_private);
+}
+
+static const struct file_operations fops_fw_capabilities = {
+       .open           = wil_fw_capabilities_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+/*---------FW version------------*/
+static int wil_fw_version_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+
+       if (wil->fw_version[0])
+               seq_printf(s, "%s\n", wil->fw_version);
+       else
+               seq_puts(s, "N/A\n");
+
+       return 0;
+}
+
+static int wil_fw_version_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_fw_version_debugfs_show,
+                          inode->i_private);
+}
+
+static const struct file_operations fops_fw_version = {
+       .open           = wil_fw_version_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
 /*----------------*/
 static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
                                       struct dentry *dbg)
@@ -1603,6 +1653,8 @@ static const struct {
        {"recovery",    S_IRUGO | S_IWUSR,      &fops_recovery},
        {"led_cfg",     S_IRUGO | S_IWUSR,      &fops_led_cfg},
        {"led_blink_time",      S_IRUGO | S_IWUSR,      &fops_led_blink_time},
+       {"fw_capabilities",     S_IRUGO,        &fops_fw_capabilities},
+       {"fw_version",  S_IRUGO,                &fops_fw_version},
 };
 
 static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1643,7 +1695,6 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
 static const struct dbg_off dbg_wil_off[] = {
        WIL_FIELD(privacy,      S_IRUGO,                doff_u32),
        WIL_FIELD(status[0],    S_IRUGO | S_IWUSR,      doff_ulong),
-       WIL_FIELD(fw_version,   S_IRUGO,                doff_u32),
        WIL_FIELD(hw_version,   S_IRUGO,                doff_x32),
        WIL_FIELD(recovery_count, S_IRUGO,              doff_u32),
        WIL_FIELD(ap_isolate,   S_IRUGO,                doff_u32),
index 7a2c6c1..2f2b910 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -58,6 +58,15 @@ struct wil_fw_record_comment { /* type == wil_fw_type_comment */
        u8 data[0]; /* free-form data [data_size], see above */
 } __packed;
 
+/* FW capabilities encoded inside a comment record */
+#define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba)
+struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
+       /* identifies capabilities record */
+       __le32 magic;
+       /* capabilities (variable size), see enum wmi_fw_capability */
+       u8 capabilities[0];
+};
+
 /* perform action
  * data_size = @head.size - offsetof(struct wil_fw_record_action, data)
  */
@@ -93,6 +102,9 @@ struct wil_fw_record_verify { /* type == wil_fw_verify */
 /* file header
  * First record of every file
  */
+/* the FW version prefix in the comment */
+#define WIL_FW_VERSION_PREFIX "FW version: "
+#define WIL_FW_VERSION_PREFIX_LEN (sizeof(WIL_FW_VERSION_PREFIX) - 1)
 struct wil_fw_record_file_header {
        __le32 signature ; /* Wilocity signature */
        __le32 reserved;
index d30657e..8f40eb3 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -118,6 +118,12 @@ static int wil_fw_verify(struct wil6210_priv *wil, const u8 *data, size_t size)
        return (int)dlen;
 }
 
+static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
+                            size_t size)
+{
+       return 0;
+}
+
 static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
                             size_t size)
 {
@@ -126,6 +132,27 @@ static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
        return 0;
 }
 
+static int
+fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
+                      size_t size)
+{
+       const struct wil_fw_record_capabilities *rec = data;
+       size_t capa_size;
+
+       if (size < sizeof(*rec) ||
+           le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
+               return 0;
+
+       capa_size = size - offsetof(struct wil_fw_record_capabilities,
+                                   capabilities);
+       bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+       memcpy(wil->fw_capabilities, rec->capabilities,
+              min(sizeof(wil->fw_capabilities), capa_size));
+       wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1,
+                       rec->capabilities, capa_size, false);
+       return 0;
+}
+
 static int fw_handle_data(struct wil6210_priv *wil, const void *data,
                          size_t size)
 {
@@ -196,6 +223,13 @@ static int fw_handle_file_header(struct wil6210_priv *wil, const void *data,
        wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, d->comment,
                        sizeof(d->comment), true);
 
+       if (!memcmp(d->comment, WIL_FW_VERSION_PREFIX,
+                   WIL_FW_VERSION_PREFIX_LEN))
+               memcpy(wil->fw_version,
+                      d->comment + WIL_FW_VERSION_PREFIX_LEN,
+                      min(sizeof(d->comment) - WIL_FW_VERSION_PREFIX_LEN,
+                          sizeof(wil->fw_version) - 1));
+
        return 0;
 }
 
@@ -383,42 +417,51 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
 
 static const struct {
        int type;
-       int (*handler)(struct wil6210_priv *wil, const void *data, size_t size);
+       int (*load_handler)(struct wil6210_priv *wil, const void *data,
+                           size_t size);
+       int (*parse_handler)(struct wil6210_priv *wil, const void *data,
+                            size_t size);
 } wil_fw_handlers[] = {
-       {wil_fw_type_comment, fw_handle_comment},
-       {wil_fw_type_data, fw_handle_data},
-       {wil_fw_type_fill, fw_handle_fill},
+       {wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
+       {wil_fw_type_data, fw_handle_data, fw_ignore_section},
+       {wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
        /* wil_fw_type_action */
        /* wil_fw_type_verify */
-       {wil_fw_type_file_header, fw_handle_file_header},
-       {wil_fw_type_direct_write, fw_handle_direct_write},
-       {wil_fw_type_gateway_data, fw_handle_gateway_data},
-       {wil_fw_type_gateway_data4, fw_handle_gateway_data4},
+       {wil_fw_type_file_header, fw_handle_file_header,
+               fw_handle_file_header},
+       {wil_fw_type_direct_write, fw_handle_direct_write, fw_ignore_section},
+       {wil_fw_type_gateway_data, fw_handle_gateway_data, fw_ignore_section},
+       {wil_fw_type_gateway_data4, fw_handle_gateway_data4,
+               fw_ignore_section},
 };
 
 static int wil_fw_handle_record(struct wil6210_priv *wil, int type,
-                               const void *data, size_t size)
+                               const void *data, size_t size, bool load)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++) {
+       for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++)
                if (wil_fw_handlers[i].type == type)
-                       return wil_fw_handlers[i].handler(wil, data, size);
-       }
+                       return load ?
+                               wil_fw_handlers[i].load_handler(
+                                       wil, data, size) :
+                               wil_fw_handlers[i].parse_handler(
+                                       wil, data, size);
 
        wil_err_fw(wil, "unknown record type: %d\n", type);
        return -EINVAL;
 }
 
 /**
- * wil_fw_load - load FW into device
- *
- * Load the FW and uCode code and data to the corresponding device
- * memory regions
+ * wil_fw_process - process section from FW file
+ * if load is true: Load the FW and uCode code and data to the
+ * corresponding device memory regions,
+ * otherwise only parse and look for capabilities
  *
  * Return error code
  */
-static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
+static int wil_fw_process(struct wil6210_priv *wil, const void *data,
+                         size_t size, bool load)
 {
        int rc = 0;
        const struct wil_fw_record_head *hdr;
@@ -437,7 +480,7 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
                        return -EINVAL;
                }
                rc = wil_fw_handle_record(wil, le16_to_cpu(hdr->type),
-                                         &hdr[1], hdr_sz);
+                                         &hdr[1], hdr_sz, load);
                if (rc)
                        return rc;
        }
@@ -456,13 +499,16 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
 }
 
 /**
- * wil_request_firmware - Request firmware and load to device
+ * wil_request_firmware - Request firmware
  *
- * Request firmware image from the file and load it to device
+ * Request firmware image from the file
+ * If load is true, load firmware to device, otherwise
+ * only parse and extract capabilities
  *
  * Return error code
  */
-int wil_request_firmware(struct wil6210_priv *wil, const char *name)
+int wil_request_firmware(struct wil6210_priv *wil, const char *name,
+                        bool load)
 {
        int rc, rc1;
        const struct firmware *fw;
@@ -482,7 +528,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name)
                        rc = rc1;
                        goto out;
                }
-               rc = wil_fw_load(wil, d, rc1);
+               rc = wil_fw_process(wil, d, rc1, load);
                if (rc < 0)
                        goto out;
        }
index 011e741..64046e0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -101,7 +101,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
              mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP);
 }
 
-static void wil6210_mask_halp(struct wil6210_priv *wil)
+void wil6210_mask_halp(struct wil6210_priv *wil)
 {
        wil_dbg_irq(wil, "%s()\n", __func__);
 
@@ -503,6 +503,13 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
                                offsetof(struct RGF_ICR, ICR));
                u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
                                     offsetof(struct RGF_ICR, IMV));
+
+               /* HALP interrupt can be unmasked when misc interrupts are
+                * masked
+                */
+               if (icr_misc & BIT_DMA_EP_MISC_ICR_HALP)
+                       return 0;
+
                wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
                                "Rx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
                                "Tx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
@@ -592,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
 
 void wil6210_set_halp(struct wil6210_priv *wil)
 {
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "%s()\n", __func__);
 
        wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
              BIT_DMA_EP_MISC_ICR_HALP);
@@ -600,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil)
 
 void wil6210_clear_halp(struct wil6210_priv *wil)
 {
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "%s()\n", __func__);
 
        wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
              BIT_DMA_EP_MISC_ICR_HALP);
index 4bc92e5..e7130b5 100644 (file)
@@ -232,6 +232,9 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
        struct net_device *ndev = wil_to_ndev(wil);
        struct wireless_dev *wdev = wil->wdev;
 
+       if (unlikely(!ndev))
+               return;
+
        might_sleep();
        wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
                 reason_code, from_event ? "+" : "-");
@@ -849,6 +852,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        bitmap_zero(wil->status, wil_status_last);
        mutex_unlock(&wil->wmi_mutex);
 
+       mutex_lock(&wil->p2p_wdev_mutex);
        if (wil->scan_request) {
                struct cfg80211_scan_info info = {
                        .aborted = true,
@@ -860,6 +864,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                cfg80211_scan_done(wil->scan_request, &info);
                wil->scan_request = NULL;
        }
+       mutex_unlock(&wil->p2p_wdev_mutex);
 
        wil_mask_irq(wil);
 
@@ -888,11 +893,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                         WIL_FW2_NAME);
 
                wil_halt_cpu(wil);
+               memset(wil->fw_version, 0, sizeof(wil->fw_version));
                /* Loading f/w from the file */
-               rc = wil_request_firmware(wil, WIL_FW_NAME);
+               rc = wil_request_firmware(wil, WIL_FW_NAME, true);
                if (rc)
                        return rc;
-               rc = wil_request_firmware(wil, WIL_FW2_NAME);
+               rc = wil_request_firmware(wil, WIL_FW2_NAME, true);
                if (rc)
                        return rc;
 
@@ -1035,10 +1041,10 @@ int wil_up(struct wil6210_priv *wil)
 
 int __wil_down(struct wil6210_priv *wil)
 {
-       int rc;
-
        WARN_ON(!mutex_is_locked(&wil->mutex));
 
+       set_bit(wil_status_resetting, wil->status);
+
        if (wil->platform_ops.bus_request)
                wil->platform_ops.bus_request(wil->platform_handle, 0);
 
@@ -1050,8 +1056,9 @@ int __wil_down(struct wil6210_priv *wil)
        }
        wil_enable_irq(wil);
 
-       (void)wil_p2p_stop_discovery(wil);
+       wil_p2p_stop_radio_operations(wil);
 
+       mutex_lock(&wil->p2p_wdev_mutex);
        if (wil->scan_request) {
                struct cfg80211_scan_info info = {
                        .aborted = true,
@@ -1063,18 +1070,7 @@ int __wil_down(struct wil6210_priv *wil)
                cfg80211_scan_done(wil->scan_request, &info);
                wil->scan_request = NULL;
        }
-
-       if (test_bit(wil_status_fwconnected, wil->status) ||
-           test_bit(wil_status_fwconnecting, wil->status)) {
-
-               mutex_unlock(&wil->mutex);
-               rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
-                             WMI_DISCONNECT_EVENTID, NULL, 0,
-                             WIL6210_DISCONNECT_TO_MS);
-               mutex_lock(&wil->mutex);
-               if (rc)
-                       wil_err(wil, "timeout waiting for disconnect\n");
-       }
+       mutex_unlock(&wil->p2p_wdev_mutex);
 
        wil_reset(wil, false);
 
@@ -1118,23 +1114,26 @@ void wil_halp_vote(struct wil6210_priv *wil)
 
        mutex_lock(&wil->halp.lock);
 
-       wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
-                    wil->halp.ref_cnt);
+       wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+                   wil->halp.ref_cnt);
 
        if (++wil->halp.ref_cnt == 1) {
                wil6210_set_halp(wil);
                rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
-               if (!rc)
+               if (!rc) {
                        wil_err(wil, "%s: HALP vote timed out\n", __func__);
-               else
-                       wil_dbg_misc(wil,
-                                    "%s: HALP vote completed after %d ms\n",
-                                    __func__,
-                                    jiffies_to_msecs(to_jiffies - rc));
+                       /* Mask HALP as done in case the interrupt is raised */
+                       wil6210_mask_halp(wil);
+               } else {
+                       wil_dbg_irq(wil,
+                                   "%s: HALP vote completed after %d ms\n",
+                                   __func__,
+                                   jiffies_to_msecs(to_jiffies - rc));
+               }
        }
 
-       wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
-                    wil->halp.ref_cnt);
+       wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+                   wil->halp.ref_cnt);
 
        mutex_unlock(&wil->halp.lock);
 }
@@ -1145,16 +1144,16 @@ void wil_halp_unvote(struct wil6210_priv *wil)
 
        mutex_lock(&wil->halp.lock);
 
-       wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
-                    wil->halp.ref_cnt);
+       wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+                   wil->halp.ref_cnt);
 
        if (--wil->halp.ref_cnt == 0) {
                wil6210_clear_halp(wil);
-               wil_dbg_misc(wil, "%s: HALP unvote\n", __func__);
+               wil_dbg_irq(wil, "%s: HALP unvote\n", __func__);
        }
 
-       wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
-                    wil->halp.ref_cnt);
+       wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+                   wil->halp.ref_cnt);
 
        mutex_unlock(&wil->halp.lock);
 }
index 0984097..61de5e9 100644 (file)
@@ -179,13 +179,6 @@ void *wil_if_alloc(struct device *dev)
        SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
        wdev->netdev = ndev;
 
-       netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
-                      WIL6210_NAPI_BUDGET);
-       netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
-                      WIL6210_NAPI_BUDGET);
-
-       netif_tx_stop_all_queues(ndev);
-
        return wil;
 
  out_priv:
@@ -216,25 +209,48 @@ void wil_if_free(struct wil6210_priv *wil)
 
 int wil_if_add(struct wil6210_priv *wil)
 {
+       struct wireless_dev *wdev = wil_to_wdev(wil);
+       struct wiphy *wiphy = wdev->wiphy;
        struct net_device *ndev = wil_to_ndev(wil);
        int rc;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "entered");
+
+       strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+
+       rc = wiphy_register(wiphy);
+       if (rc < 0) {
+               wil_err(wil, "failed to register wiphy, err %d\n", rc);
+               return rc;
+       }
+
+       netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
+                      WIL6210_NAPI_BUDGET);
+       netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
+                         WIL6210_NAPI_BUDGET);
+
+       netif_tx_stop_all_queues(ndev);
 
        rc = register_netdev(ndev);
        if (rc < 0) {
                dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
-               return rc;
+               goto out_wiphy;
        }
 
        return 0;
+
+out_wiphy:
+       wiphy_unregister(wdev->wiphy);
+       return rc;
 }
 
 void wil_if_remove(struct wil6210_priv *wil)
 {
        struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil_to_wdev(wil);
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
        unregister_netdev(ndev);
+       wiphy_unregister(wdev->wiphy);
 }
index e0f8aa0..4087785 100644 (file)
@@ -263,3 +263,49 @@ void wil_p2p_search_expired(struct work_struct *work)
                mutex_unlock(&wil->p2p_wdev_mutex);
        }
 }
+
+void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
+{
+       struct wil_p2p_info *p2p = &wil->p2p;
+       struct cfg80211_scan_info info = {
+               .aborted = true,
+       };
+
+       lockdep_assert_held(&wil->mutex);
+
+       mutex_lock(&wil->p2p_wdev_mutex);
+
+       if (wil->radio_wdev != wil->p2p_wdev)
+               goto out;
+
+       if (!p2p->discovery_started) {
+               /* Regular scan on the p2p device */
+               if (wil->scan_request &&
+                   wil->scan_request->wdev == wil->p2p_wdev) {
+                       cfg80211_scan_done(wil->scan_request, &info);
+                       wil->scan_request = NULL;
+               }
+               goto out;
+       }
+
+       /* Search or listen on p2p device */
+       mutex_unlock(&wil->p2p_wdev_mutex);
+       wil_p2p_stop_discovery(wil);
+       mutex_lock(&wil->p2p_wdev_mutex);
+
+       if (wil->scan_request) {
+               /* search */
+               cfg80211_scan_done(wil->scan_request, &info);
+               wil->scan_request = NULL;
+       } else {
+               /* listen */
+               cfg80211_remain_on_channel_expired(wil->radio_wdev,
+                                                  p2p->cookie,
+                                                  &p2p->listen_chan,
+                                                  GFP_KERNEL);
+       }
+
+out:
+       wil->radio_wdev = wil->wdev;
+       mutex_unlock(&wil->p2p_wdev_mutex);
+}
index 7b5c422..44746ca 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/interrupt.h>
 #include <linux/suspend.h>
 #include "wil6210.h"
+#include <linux/rtnetlink.h>
 
 static bool use_msi = true;
 module_param(use_msi, bool, S_IRUGO);
@@ -38,6 +39,7 @@ void wil_set_capabilities(struct wil6210_priv *wil)
        u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
 
        bitmap_zero(wil->hw_capabilities, hw_capability_last);
+       bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
 
        switch (rev_id) {
        case JTAG_DEV_ID_SPARROW_B0:
@@ -51,6 +53,9 @@ void wil_set_capabilities(struct wil6210_priv *wil)
        }
 
        wil_info(wil, "Board hardware is %s\n", wil->hw_name);
+
+       /* extract FW capabilities from file without loading the FW */
+       wil_request_firmware(wil, WIL_FW_NAME, false);
 }
 
 void wil_disable_irq(struct wil6210_priv *wil)
@@ -293,6 +298,9 @@ static void wil_pcie_remove(struct pci_dev *pdev)
 #endif /* CONFIG_PM */
 
        wil6210_debugfs_remove(wil);
+       rtnl_lock();
+       wil_p2p_wdev_free(wil);
+       rtnl_unlock();
        wil_if_remove(wil);
        wil_if_pcie_disable(wil);
        pci_iounmap(pdev, csr);
@@ -300,7 +308,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
        if (wil->platform_ops.uninit)
                wil->platform_ops.uninit(wil->platform_handle);
-       wil_p2p_wdev_free(wil);
        wil_if_free(wil);
 }
 
index f2f6a40..4c38520 100644 (file)
@@ -873,9 +873,12 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
                rc = -EINVAL;
                goto out_free;
        }
-       vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
 
+       spin_lock_bh(&txdata->lock);
+       vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
        txdata->enabled = 1;
+       spin_unlock_bh(&txdata->lock);
+
        if (txdata->dot1x_open && (agg_wsize >= 0))
                wil_addba_tx_request(wil, id, agg_wsize);
 
@@ -950,9 +953,11 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
                rc = -EINVAL;
                goto out_free;
        }
-       vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
 
+       spin_lock_bh(&txdata->lock);
+       vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
        txdata->enabled = 1;
+       spin_unlock_bh(&txdata->lock);
 
        return 0;
  out_free:
index ecab4af..a949cd6 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef __WIL6210_H__
 #define __WIL6210_H__
 
+#include <linux/etherdevice.h>
 #include <linux/netdevice.h>
 #include <linux/wireless.h>
 #include <net/cfg80211.h>
@@ -576,10 +577,11 @@ struct wil6210_priv {
        struct wireless_dev *wdev;
        void __iomem *csr;
        DECLARE_BITMAP(status, wil_status_last);
-       u32 fw_version;
+       u8 fw_version[ETHTOOL_FWVERS_LEN];
        u32 hw_version;
        const char *hw_name;
        DECLARE_BITMAP(hw_capabilities, hw_capability_last);
+       DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
        u8 n_mids; /* number of additional MIDs as reported by FW */
        u32 recovery_count; /* num of FW recovery attempts in a short time */
        u32 recovery_state; /* FW recovery state machine */
@@ -657,7 +659,7 @@ struct wil6210_priv {
 
        /* P2P_DEVICE vif */
        struct wireless_dev *p2p_wdev;
-       struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
+       struct mutex p2p_wdev_mutex; /* protect @p2p_wdev and @scan_request */
        struct wireless_dev *radio_wdev;
 
        /* High Access Latency Policy voting */
@@ -828,6 +830,7 @@ void wil_unmask_irq(struct wil6210_priv *wil);
 void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
 void wil_disable_irq(struct wil6210_priv *wil);
 void wil_enable_irq(struct wil6210_priv *wil);
+void wil6210_mask_halp(struct wil6210_priv *wil);
 
 /* P2P */
 bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
@@ -840,6 +843,7 @@ u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
 int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
 void wil_p2p_listen_expired(struct work_struct *work);
 void wil_p2p_search_expired(struct work_struct *work);
+void wil_p2p_stop_radio_operations(struct wil6210_priv *wil);
 
 /* WMI for P2P */
 int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
@@ -893,7 +897,8 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
 int wil_iftype_nl2wmi(enum nl80211_iftype type);
 
 int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
-int wil_request_firmware(struct wil6210_priv *wil, const char *name);
+int wil_request_firmware(struct wil6210_priv *wil, const char *name,
+                        bool load);
 
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
 int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
index 4d92541..fae4f12 100644 (file)
@@ -312,14 +312,14 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
        struct wireless_dev *wdev = wil->wdev;
        struct wmi_ready_event *evt = d;
 
-       wil->fw_version = le32_to_cpu(evt->sw_version);
        wil->n_mids = evt->numof_additional_mids;
 
-       wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
+       wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n",
+                wil->fw_version, le32_to_cpu(evt->sw_version),
                 evt->mac, wil->n_mids);
        /* ignore MAC address, we already have it from the boot loader */
-       snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
-                "%d", wil->fw_version);
+       strlcpy(wdev->wiphy->fw_version, wil->fw_version,
+               sizeof(wdev->wiphy->fw_version));
 
        wil_set_recovery_state(wil, fw_recovery_idle);
        set_bit(wil_status_fwready, wil->status);
@@ -424,6 +424,7 @@ static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
 static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
                                  void *d, int len)
 {
+       mutex_lock(&wil->p2p_wdev_mutex);
        if (wil->scan_request) {
                struct wmi_scan_complete_event *data = d;
                struct cfg80211_scan_info info = {
@@ -435,14 +436,13 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
                             wil->scan_request, info.aborted);
 
                del_timer_sync(&wil->scan_timer);
-               mutex_lock(&wil->p2p_wdev_mutex);
                cfg80211_scan_done(wil->scan_request, &info);
                wil->radio_wdev = wil->wdev;
-               mutex_unlock(&wil->p2p_wdev_mutex);
                wil->scan_request = NULL;
        } else {
                wil_err(wil, "SCAN_COMPLETE while not scanning\n");
        }
+       mutex_unlock(&wil->p2p_wdev_mutex);
 }
 
 static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
index 685fe0d..f430e8a 100644 (file)
@@ -46,6 +46,16 @@ enum wmi_mid {
        MID_BROADCAST           = 0xFF,
 };
 
+/* FW capability IDs
+ * Each ID maps to a bit in a 32-bit bitmask value provided by the FW to
+ * the host
+ */
+enum wmi_fw_capability {
+       WMI_FW_CAPABILITY_FTM           = 0,
+       WMI_FW_CAPABILITY_PS_CONFIG     = 1,
+       WMI_FW_CAPABILITY_MAX,
+};
+
 /* WMI_CMD_HDR */
 struct wmi_cmd_hdr {
        u8 mid;
@@ -120,6 +130,8 @@ enum wmi_command_id {
        WMI_BF_SM_MGMT_CMDID                    = 0x838,
        WMI_BF_RXSS_MGMT_CMDID                  = 0x839,
        WMI_BF_TRIG_CMDID                       = 0x83A,
+       WMI_LINK_MAINTAIN_CFG_WRITE_CMDID       = 0x842,
+       WMI_LINK_MAINTAIN_CFG_READ_CMDID        = 0x843,
        WMI_SET_SECTORS_CMDID                   = 0x849,
        WMI_MAINTAIN_PAUSE_CMDID                = 0x850,
        WMI_MAINTAIN_RESUME_CMDID               = 0x851,
@@ -134,10 +146,15 @@ enum wmi_command_id {
        WMI_BF_CTRL_CMDID                       = 0x862,
        WMI_NOTIFY_REQ_CMDID                    = 0x863,
        WMI_GET_STATUS_CMDID                    = 0x864,
+       WMI_GET_RF_STATUS_CMDID                 = 0x866,
+       WMI_GET_BASEBAND_TYPE_CMDID             = 0x867,
        WMI_UNIT_TEST_CMDID                     = 0x900,
        WMI_HICCUP_CMDID                        = 0x901,
        WMI_FLASH_READ_CMDID                    = 0x902,
        WMI_FLASH_WRITE_CMDID                   = 0x903,
+       /* Power management */
+       WMI_TRAFFIC_DEFERRAL_CMDID              = 0x904,
+       WMI_TRAFFIC_RESUME_CMDID                = 0x905,
        /* P2P */
        WMI_P2P_CFG_CMDID                       = 0x910,
        WMI_PORT_ALLOCATE_CMDID                 = 0x911,
@@ -150,6 +167,26 @@ enum wmi_command_id {
        WMI_PCP_START_CMDID                     = 0x918,
        WMI_PCP_STOP_CMDID                      = 0x919,
        WMI_GET_PCP_FACTOR_CMDID                = 0x91B,
+       /* Power Save Configuration Commands */
+       WMI_PS_DEV_PROFILE_CFG_CMDID            = 0x91C,
+       /* Not supported yet */
+       WMI_PS_DEV_CFG_CMDID                    = 0x91D,
+       /* Not supported yet */
+       WMI_PS_DEV_CFG_READ_CMDID               = 0x91E,
+       /* Per MAC Power Save Configuration commands
+        * Not supported yet
+        */
+       WMI_PS_MID_CFG_CMDID                    = 0x91F,
+       /* Not supported yet */
+       WMI_PS_MID_CFG_READ_CMDID               = 0x920,
+       WMI_RS_CFG_CMDID                        = 0x921,
+       WMI_GET_DETAILED_RS_RES_CMDID           = 0x922,
+       WMI_AOA_MEAS_CMDID                      = 0x923,
+       WMI_TOF_SESSION_START_CMDID             = 0x991,
+       WMI_TOF_GET_CAPABILITIES_CMDID          = 0x992,
+       WMI_TOF_SET_LCR_CMDID                   = 0x993,
+       WMI_TOF_SET_LCI_CMDID                   = 0x994,
+       WMI_TOF_CHANNEL_INFO_CMDID              = 0x995,
        WMI_SET_MAC_ADDRESS_CMDID               = 0xF003,
        WMI_ABORT_SCAN_CMDID                    = 0xF007,
        WMI_SET_PROMISCUOUS_MODE_CMDID          = 0xF041,
@@ -291,9 +328,8 @@ enum wmi_scan_type {
 /* WMI_START_SCAN_CMDID */
 struct wmi_start_scan_cmd {
        u8 direct_scan_mac_addr[WMI_MAC_LEN];
-       /* DMG Beacon frame is transmitted during active scanning */
+       /* run scan with discovery beacon. Relevant for ACTIVE scan only. */
        u8 discovery_mode;
-       /* reserved */
        u8 reserved;
        /* Max duration in the home channel(ms) */
        __le32 dwell_time;
@@ -453,6 +489,12 @@ struct wmi_port_delete_cmd {
        u8 reserved[3];
 } __packed;
 
+/* WMI_TRAFFIC_DEFERRAL_CMDID */
+struct wmi_traffic_deferral_cmd {
+       /* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
+       u8 wakeup_trigger;
+} __packed;
+
 /* WMI_P2P_CFG_CMDID */
 enum wmi_discovery_mode {
        WMI_DISCOVERY_MODE_NON_OFFLOAD  = 0x00,
@@ -818,85 +860,193 @@ struct wmi_pmc_cmd {
        __le64 mem_base;
 } __packed;
 
+enum wmi_aoa_meas_type {
+       WMI_AOA_PHASE_MEAS      = 0x00,
+       WMI_AOA_PHASE_AMP_MEAS  = 0x01,
+};
+
+/* WMI_AOA_MEAS_CMDID */
+struct wmi_aoa_meas_cmd {
+       u8 mac_addr[WMI_MAC_LEN];
+       /* channels IDs:
+        * 0 - 58320 MHz
+        * 1 - 60480 MHz
+        * 2 - 62640 MHz
+        */
+       u8 channel;
+       /* enum wmi_aoa_meas_type */
+       u8 aoa_meas_type;
+       __le32 meas_rf_mask;
+} __packed;
+
+enum wmi_tof_burst_duration {
+       WMI_TOF_BURST_DURATION_250_USEC         = 2,
+       WMI_TOF_BURST_DURATION_500_USEC         = 3,
+       WMI_TOF_BURST_DURATION_1_MSEC           = 4,
+       WMI_TOF_BURST_DURATION_2_MSEC           = 5,
+       WMI_TOF_BURST_DURATION_4_MSEC           = 6,
+       WMI_TOF_BURST_DURATION_8_MSEC           = 7,
+       WMI_TOF_BURST_DURATION_16_MSEC          = 8,
+       WMI_TOF_BURST_DURATION_32_MSEC          = 9,
+       WMI_TOF_BURST_DURATION_64_MSEC          = 10,
+       WMI_TOF_BURST_DURATION_128_MSEC         = 11,
+       WMI_TOF_BURST_DURATION_NO_PREFERENCES   = 15,
+};
+
+enum wmi_tof_session_start_flags {
+       WMI_TOF_SESSION_START_FLAG_SECURED      = 0x1,
+       WMI_TOF_SESSION_START_FLAG_ASAP         = 0x2,
+       WMI_TOF_SESSION_START_FLAG_LCI_REQ      = 0x4,
+       WMI_TOF_SESSION_START_FLAG_LCR_REQ      = 0x8,
+};
+
+/* WMI_TOF_SESSION_START_CMDID */
+struct wmi_ftm_dest_info {
+       u8 channel;
+       /* wmi_tof_session_start_flags_e */
+       u8 flags;
+       u8 initial_token;
+       u8 num_of_ftm_per_burst;
+       u8 num_of_bursts_exp;
+       /* wmi_tof_burst_duration_e */
+       u8 burst_duration;
+       /* Burst Period indicate interval between two consecutive burst
+        * instances, in units of 100 ms
+        */
+       __le16 burst_period;
+       u8 dst_mac[WMI_MAC_LEN];
+       __le16 reserved;
+} __packed;
+
+/* WMI_TOF_SESSION_START_CMDID */
+struct wmi_tof_session_start_cmd {
+       __le32 session_id;
+       u8 num_of_aoa_measures;
+       u8 aoa_type;
+       __le16 num_of_dest;
+       u8 reserved[4];
+       struct wmi_ftm_dest_info ftm_dest_info[0];
+} __packed;
+
+enum wmi_tof_channel_info_report_type {
+       WMI_TOF_CHANNEL_INFO_TYPE_CIR                   = 0x1,
+       WMI_TOF_CHANNEL_INFO_TYPE_RSSI                  = 0x2,
+       WMI_TOF_CHANNEL_INFO_TYPE_SNR                   = 0x4,
+       WMI_TOF_CHANNEL_INFO_TYPE_DEBUG_DATA            = 0x8,
+       WMI_TOF_CHANNEL_INFO_TYPE_VENDOR_SPECIFIC       = 0x10,
+};
+
+/* WMI_TOF_CHANNEL_INFO_CMDID */
+struct wmi_tof_channel_info_cmd {
+       /* wmi_tof_channel_info_report_type_e */
+       __le32 channel_info_report_request;
+} __packed;
+
 /* WMI Events
  * List of Events (target to host)
  */
 enum wmi_event_id {
-       WMI_READY_EVENTID                       = 0x1001,
-       WMI_CONNECT_EVENTID                     = 0x1002,
-       WMI_DISCONNECT_EVENTID                  = 0x1003,
-       WMI_SCAN_COMPLETE_EVENTID               = 0x100A,
-       WMI_REPORT_STATISTICS_EVENTID           = 0x100B,
-       WMI_RD_MEM_RSP_EVENTID                  = 0x1800,
-       WMI_FW_READY_EVENTID                    = 0x1801,
-       WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID      = 0x200,
-       WMI_ECHO_RSP_EVENTID                    = 0x1803,
-       WMI_FS_TUNE_DONE_EVENTID                = 0x180A,
-       WMI_CORR_MEASURE_EVENTID                = 0x180B,
-       WMI_READ_RSSI_EVENTID                   = 0x180C,
-       WMI_TEMP_SENSE_DONE_EVENTID             = 0x180E,
-       WMI_DC_CALIB_DONE_EVENTID               = 0x180F,
-       WMI_IQ_TX_CALIB_DONE_EVENTID            = 0x1811,
-       WMI_IQ_RX_CALIB_DONE_EVENTID            = 0x1812,
-       WMI_SET_WORK_MODE_DONE_EVENTID          = 0x1815,
-       WMI_LO_LEAKAGE_CALIB_DONE_EVENTID       = 0x1816,
-       WMI_MARLON_R_READ_DONE_EVENTID          = 0x1818,
-       WMI_MARLON_R_WRITE_DONE_EVENTID         = 0x1819,
-       WMI_MARLON_R_TXRX_SEL_DONE_EVENTID      = 0x181A,
-       WMI_SILENT_RSSI_CALIB_DONE_EVENTID      = 0x181D,
-       WMI_RF_RX_TEST_DONE_EVENTID             = 0x181E,
-       WMI_CFG_RX_CHAIN_DONE_EVENTID           = 0x1820,
-       WMI_VRING_CFG_DONE_EVENTID              = 0x1821,
-       WMI_BA_STATUS_EVENTID                   = 0x1823,
-       WMI_RCP_ADDBA_REQ_EVENTID               = 0x1824,
-       WMI_RCP_ADDBA_RESP_SENT_EVENTID         = 0x1825,
-       WMI_DELBA_EVENTID                       = 0x1826,
-       WMI_GET_SSID_EVENTID                    = 0x1828,
-       WMI_GET_PCP_CHANNEL_EVENTID             = 0x182A,
-       WMI_SW_TX_COMPLETE_EVENTID              = 0x182B,
-       WMI_READ_MAC_RXQ_EVENTID                = 0x1830,
-       WMI_READ_MAC_TXQ_EVENTID                = 0x1831,
-       WMI_WRITE_MAC_RXQ_EVENTID               = 0x1832,
-       WMI_WRITE_MAC_TXQ_EVENTID               = 0x1833,
-       WMI_WRITE_MAC_XQ_FIELD_EVENTID          = 0x1834,
-       WMI_BEAMFORMING_MGMT_DONE_EVENTID       = 0x1836,
-       WMI_BF_TXSS_MGMT_DONE_EVENTID           = 0x1837,
-       WMI_BF_RXSS_MGMT_DONE_EVENTID           = 0x1839,
-       WMI_RS_MGMT_DONE_EVENTID                = 0x1852,
-       WMI_RF_MGMT_STATUS_EVENTID              = 0x1853,
-       WMI_THERMAL_THROTTLING_STATUS_EVENTID   = 0x1855,
-       WMI_BF_SM_MGMT_DONE_EVENTID             = 0x1838,
-       WMI_RX_MGMT_PACKET_EVENTID              = 0x1840,
-       WMI_TX_MGMT_PACKET_EVENTID              = 0x1841,
-       WMI_OTP_READ_RESULT_EVENTID             = 0x1856,
-       WMI_LED_CFG_DONE_EVENTID                = 0x1858,
+       WMI_READY_EVENTID                               = 0x1001,
+       WMI_CONNECT_EVENTID                             = 0x1002,
+       WMI_DISCONNECT_EVENTID                          = 0x1003,
+       WMI_SCAN_COMPLETE_EVENTID                       = 0x100A,
+       WMI_REPORT_STATISTICS_EVENTID                   = 0x100B,
+       WMI_RD_MEM_RSP_EVENTID                          = 0x1800,
+       WMI_FW_READY_EVENTID                            = 0x1801,
+       WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID              = 0x200,
+       WMI_ECHO_RSP_EVENTID                            = 0x1803,
+       WMI_FS_TUNE_DONE_EVENTID                        = 0x180A,
+       WMI_CORR_MEASURE_EVENTID                        = 0x180B,
+       WMI_READ_RSSI_EVENTID                           = 0x180C,
+       WMI_TEMP_SENSE_DONE_EVENTID                     = 0x180E,
+       WMI_DC_CALIB_DONE_EVENTID                       = 0x180F,
+       WMI_IQ_TX_CALIB_DONE_EVENTID                    = 0x1811,
+       WMI_IQ_RX_CALIB_DONE_EVENTID                    = 0x1812,
+       WMI_SET_WORK_MODE_DONE_EVENTID                  = 0x1815,
+       WMI_LO_LEAKAGE_CALIB_DONE_EVENTID               = 0x1816,
+       WMI_MARLON_R_READ_DONE_EVENTID                  = 0x1818,
+       WMI_MARLON_R_WRITE_DONE_EVENTID                 = 0x1819,
+       WMI_MARLON_R_TXRX_SEL_DONE_EVENTID              = 0x181A,
+       WMI_SILENT_RSSI_CALIB_DONE_EVENTID              = 0x181D,
+       WMI_RF_RX_TEST_DONE_EVENTID                     = 0x181E,
+       WMI_CFG_RX_CHAIN_DONE_EVENTID                   = 0x1820,
+       WMI_VRING_CFG_DONE_EVENTID                      = 0x1821,
+       WMI_BA_STATUS_EVENTID                           = 0x1823,
+       WMI_RCP_ADDBA_REQ_EVENTID                       = 0x1824,
+       WMI_RCP_ADDBA_RESP_SENT_EVENTID                 = 0x1825,
+       WMI_DELBA_EVENTID                               = 0x1826,
+       WMI_GET_SSID_EVENTID                            = 0x1828,
+       WMI_GET_PCP_CHANNEL_EVENTID                     = 0x182A,
+       WMI_SW_TX_COMPLETE_EVENTID                      = 0x182B,
+       WMI_READ_MAC_RXQ_EVENTID                        = 0x1830,
+       WMI_READ_MAC_TXQ_EVENTID                        = 0x1831,
+       WMI_WRITE_MAC_RXQ_EVENTID                       = 0x1832,
+       WMI_WRITE_MAC_TXQ_EVENTID                       = 0x1833,
+       WMI_WRITE_MAC_XQ_FIELD_EVENTID                  = 0x1834,
+       WMI_BEAMFORMING_MGMT_DONE_EVENTID               = 0x1836,
+       WMI_BF_TXSS_MGMT_DONE_EVENTID                   = 0x1837,
+       WMI_BF_RXSS_MGMT_DONE_EVENTID                   = 0x1839,
+       WMI_RS_MGMT_DONE_EVENTID                        = 0x1852,
+       WMI_RF_MGMT_STATUS_EVENTID                      = 0x1853,
+       WMI_THERMAL_THROTTLING_STATUS_EVENTID           = 0x1855,
+       WMI_BF_SM_MGMT_DONE_EVENTID                     = 0x1838,
+       WMI_RX_MGMT_PACKET_EVENTID                      = 0x1840,
+       WMI_TX_MGMT_PACKET_EVENTID                      = 0x1841,
+       WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID        = 0x1842,
+       WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID         = 0x1843,
+       WMI_OTP_READ_RESULT_EVENTID                     = 0x1856,
+       WMI_LED_CFG_DONE_EVENTID                        = 0x1858,
        /* Performance monitoring events */
-       WMI_DATA_PORT_OPEN_EVENTID              = 0x1860,
-       WMI_WBE_LINK_DOWN_EVENTID               = 0x1861,
-       WMI_BF_CTRL_DONE_EVENTID                = 0x1862,
-       WMI_NOTIFY_REQ_DONE_EVENTID             = 0x1863,
-       WMI_GET_STATUS_DONE_EVENTID             = 0x1864,
-       WMI_VRING_EN_EVENTID                    = 0x1865,
-       WMI_UNIT_TEST_EVENTID                   = 0x1900,
-       WMI_FLASH_READ_DONE_EVENTID             = 0x1902,
-       WMI_FLASH_WRITE_DONE_EVENTID            = 0x1903,
+       WMI_DATA_PORT_OPEN_EVENTID                      = 0x1860,
+       WMI_WBE_LINK_DOWN_EVENTID                       = 0x1861,
+       WMI_BF_CTRL_DONE_EVENTID                        = 0x1862,
+       WMI_NOTIFY_REQ_DONE_EVENTID                     = 0x1863,
+       WMI_GET_STATUS_DONE_EVENTID                     = 0x1864,
+       WMI_VRING_EN_EVENTID                            = 0x1865,
+       WMI_GET_RF_STATUS_EVENTID                       = 0x1866,
+       WMI_GET_BASEBAND_TYPE_EVENTID                   = 0x1867,
+       WMI_UNIT_TEST_EVENTID                           = 0x1900,
+       WMI_FLASH_READ_DONE_EVENTID                     = 0x1902,
+       WMI_FLASH_WRITE_DONE_EVENTID                    = 0x1903,
+       /* Power management */
+       WMI_TRAFFIC_DEFERRAL_EVENTID                    = 0x1904,
+       WMI_TRAFFIC_RESUME_EVENTID                      = 0x1905,
        /* P2P */
-       WMI_P2P_CFG_DONE_EVENTID                = 0x1910,
-       WMI_PORT_ALLOCATED_EVENTID              = 0x1911,
-       WMI_PORT_DELETED_EVENTID                = 0x1912,
-       WMI_LISTEN_STARTED_EVENTID              = 0x1914,
-       WMI_SEARCH_STARTED_EVENTID              = 0x1915,
-       WMI_DISCOVERY_STARTED_EVENTID           = 0x1916,
-       WMI_DISCOVERY_STOPPED_EVENTID           = 0x1917,
-       WMI_PCP_STARTED_EVENTID                 = 0x1918,
-       WMI_PCP_STOPPED_EVENTID                 = 0x1919,
-       WMI_PCP_FACTOR_EVENTID                  = 0x191A,
-       WMI_SET_CHANNEL_EVENTID                 = 0x9000,
-       WMI_ASSOC_REQ_EVENTID                   = 0x9001,
-       WMI_EAPOL_RX_EVENTID                    = 0x9002,
-       WMI_MAC_ADDR_RESP_EVENTID               = 0x9003,
-       WMI_FW_VER_EVENTID                      = 0x9004,
-       WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID   = 0x9005,
+       WMI_P2P_CFG_DONE_EVENTID                        = 0x1910,
+       WMI_PORT_ALLOCATED_EVENTID                      = 0x1911,
+       WMI_PORT_DELETED_EVENTID                        = 0x1912,
+       WMI_LISTEN_STARTED_EVENTID                      = 0x1914,
+       WMI_SEARCH_STARTED_EVENTID                      = 0x1915,
+       WMI_DISCOVERY_STARTED_EVENTID                   = 0x1916,
+       WMI_DISCOVERY_STOPPED_EVENTID                   = 0x1917,
+       WMI_PCP_STARTED_EVENTID                         = 0x1918,
+       WMI_PCP_STOPPED_EVENTID                         = 0x1919,
+       WMI_PCP_FACTOR_EVENTID                          = 0x191A,
+       /* Power Save Configuration Events */
+       WMI_PS_DEV_PROFILE_CFG_EVENTID                  = 0x191C,
+       /* Not supported yet */
+       WMI_PS_DEV_CFG_EVENTID                          = 0x191D,
+       /* Not supported yet */
+       WMI_PS_DEV_CFG_READ_EVENTID                     = 0x191E,
+       /* Not supported yet */
+       WMI_PS_MID_CFG_EVENTID                          = 0x191F,
+       /* Not supported yet */
+       WMI_PS_MID_CFG_READ_EVENTID                     = 0x1920,
+       WMI_RS_CFG_DONE_EVENTID                         = 0x1921,
+       WMI_GET_DETAILED_RS_RES_EVENTID                 = 0x1922,
+       WMI_AOA_MEAS_EVENTID                            = 0x1923,
+       WMI_TOF_SESSION_END_EVENTID                     = 0x1991,
+       WMI_TOF_GET_CAPABILITIES_EVENTID                = 0x1992,
+       WMI_TOF_SET_LCR_EVENTID                         = 0x1993,
+       WMI_TOF_SET_LCI_EVENTID                         = 0x1994,
+       WMI_TOF_FTM_PER_DEST_RES_EVENTID                = 0x1995,
+       WMI_TOF_CHANNEL_INFO_EVENTID                    = 0x1996,
+       WMI_SET_CHANNEL_EVENTID                         = 0x9000,
+       WMI_ASSOC_REQ_EVENTID                           = 0x9001,
+       WMI_EAPOL_RX_EVENTID                            = 0x9002,
+       WMI_MAC_ADDR_RESP_EVENTID                       = 0x9003,
+       WMI_FW_VER_EVENTID                              = 0x9004,
+       WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID           = 0x9005,
 };
 
 /* Events data structures */
@@ -943,10 +1093,85 @@ struct wmi_get_status_done_event {
 
 /* WMI_FW_VER_EVENTID */
 struct wmi_fw_ver_event {
-       u8 major;
-       u8 minor;
-       __le16 subminor;
-       __le16 build;
+       /* FW image version */
+       __le32 fw_major;
+       __le32 fw_minor;
+       __le32 fw_subminor;
+       __le32 fw_build;
+       /* FW image build time stamp */
+       __le32 hour;
+       __le32 minute;
+       __le32 second;
+       __le32 day;
+       __le32 month;
+       __le32 year;
+       /* Boot Loader image version */
+       __le32 bl_major;
+       __le32 bl_minor;
+       __le32 bl_subminor;
+       __le32 bl_build;
+       /* The number of entries in the FW capabilies array */
+       u8 fw_capabilities_len;
+       u8 reserved[3];
+       /* FW capabilities info
+        * Must be the last member of the struct
+        */
+       __le32 fw_capabilities[0];
+} __packed;
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum rf_type {
+       RF_UNKNOWN      = 0x00,
+       RF_MARLON       = 0x01,
+       RF_SPARROW      = 0x02,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum board_file_rf_type {
+       BF_RF_MARLON    = 0x00,
+       BF_RF_SPARROW   = 0x01,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum rf_status {
+       RF_OK                   = 0x00,
+       RF_NO_COMM              = 0x01,
+       RF_WRONG_BOARD_FILE     = 0x02,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+struct wmi_get_rf_status_event {
+       /* enum rf_type */
+       __le32 rf_type;
+       /* attached RFs bit vector */
+       __le32 attached_rf_vector;
+       /* enabled RFs bit vector */
+       __le32 enabled_rf_vector;
+       /* enum rf_status, refers to enabled RFs */
+       u8 rf_status[32];
+       /* enum board file RF type */
+       __le32 board_file_rf_type;
+       /* board file platform type */
+       __le32 board_file_platform_type;
+       /* board file version */
+       __le32 board_file_version;
+       __le32 reserved[2];
+} __packed;
+
+/* WMI_GET_BASEBAND_TYPE_EVENTID */
+enum baseband_type {
+       BASEBAND_UNKNOWN        = 0x00,
+       BASEBAND_SPARROW_M_A0   = 0x03,
+       BASEBAND_SPARROW_M_A1   = 0x04,
+       BASEBAND_SPARROW_M_B0   = 0x05,
+       BASEBAND_SPARROW_M_C0   = 0x06,
+       BASEBAND_SPARROW_M_D0   = 0x07,
+};
+
+/* WMI_GET_BASEBAND_TYPE_EVENTID */
+struct wmi_get_baseband_type_event {
+       /* enum baseband_type */
+       __le32 baseband_type;
 } __packed;
 
 /* WMI_MAC_ADDR_RESP_EVENTID */
@@ -1410,4 +1635,553 @@ struct wmi_led_cfg_done_event {
        __le32 status;
 } __packed;
 
+#define WMI_NUM_MCS    (13)
+
+/* Rate search parameters configuration per connection */
+struct wmi_rs_cfg {
+       /* The maximal allowed PER for each MCS
+        * MCS will be considered as failed if PER during RS is higher
+        */
+       u8 per_threshold[WMI_NUM_MCS];
+       /* Number of MPDUs for each MCS
+        * this is the minimal statistic required to make an educated
+        * decision
+        */
+       u8 min_frame_cnt[WMI_NUM_MCS];
+       /* stop threshold [0-100] */
+       u8 stop_th;
+       /* MCS1 stop threshold [0-100] */
+       u8 mcs1_fail_th;
+       u8 max_back_failure_th;
+       /* Debug feature for disabling internal RS trigger (which is
+        * currently triggered by BF Done)
+        */
+       u8 dbg_disable_internal_trigger;
+       __le32 back_failure_mask;
+       __le32 mcs_en_vec;
+} __packed;
+
+/* WMI_RS_CFG_CMDID */
+struct wmi_rs_cfg_cmd {
+       /* connection id */
+       u8 cid;
+       /* enable or disable rate search */
+       u8 rs_enable;
+       /* rate search configuration */
+       struct wmi_rs_cfg rs_cfg;
+} __packed;
+
+/* WMI_RS_CFG_DONE_EVENTID */
+struct wmi_rs_cfg_done_event {
+       u8 cid;
+       /* enum wmi_fw_status */
+       u8 status;
+       u8 reserved[2];
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_CMDID */
+struct wmi_get_detailed_rs_res_cmd {
+       /* connection id */
+       u8 cid;
+       u8 reserved[3];
+} __packed;
+
+/* RS results status */
+enum wmi_rs_results_status {
+       WMI_RS_RES_VALID        = 0x00,
+       WMI_RS_RES_INVALID      = 0x01,
+};
+
+/* Rate search results */
+struct wmi_rs_results {
+       /* number of sent MPDUs */
+       u8 num_of_tx_pkt[WMI_NUM_MCS];
+       /* number of non-acked MPDUs */
+       u8 num_of_non_acked_pkt[WMI_NUM_MCS];
+       /* RS timestamp */
+       __le32 tsf;
+       /* RS selected MCS */
+       u8 mcs;
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_EVENTID */
+struct wmi_get_detailed_rs_res_event {
+       u8 cid;
+       /* enum wmi_rs_results_status */
+       u8 status;
+       /* detailed rs results */
+       struct wmi_rs_results rs_results;
+       u8 reserved[3];
+} __packed;
+
+/* broadcast connection ID */
+#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST    (0xFFFFFFFF)
+
+/* Types wmi_link_maintain_cfg presets for WMI_LINK_MAINTAIN_CFG_WRITE_CMD */
+enum wmi_link_maintain_cfg_type {
+       /* AP/PCP default normal (non-FST) configuration settings */
+       WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_AP    = 0x00,
+       /* AP/PCP  default FST configuration settings */
+       WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_AP       = 0x01,
+       /* STA default normal (non-FST) configuration settings */
+       WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_STA   = 0x02,
+       /* STA default FST configuration settings */
+       WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_STA      = 0x03,
+       /* custom configuration settings */
+       WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM               = 0x04,
+       /* number of defined configuration types */
+       WMI_LINK_MAINTAIN_CFG_TYPES_NUM                 = 0x05,
+};
+
+/* Response status codes for WMI_LINK_MAINTAIN_CFG_WRITE/READ commands */
+enum wmi_link_maintain_cfg_response_status {
+       /* WMI_LINK_MAINTAIN_CFG_WRITE/READ command successfully accomplished
+        */
+       WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_OK                = 0x00,
+       /* ERROR due to bad argument in WMI_LINK_MAINTAIN_CFG_WRITE/READ
+        * command request
+        */
+       WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_BAD_ARGUMENT      = 0x01,
+};
+
+/* Link Loss and Keep Alive configuration */
+struct wmi_link_maintain_cfg {
+       /* link_loss_enable_detectors_vec */
+       __le32 link_loss_enable_detectors_vec;
+       /* detectors check period usec */
+       __le32 check_link_loss_period_usec;
+       /* max allowed tx ageing */
+       __le32 tx_ageing_threshold_usec;
+       /* keep alive period for high SNR */
+       __le32 keep_alive_period_usec_high_snr;
+       /* keep alive period for low SNR */
+       __le32 keep_alive_period_usec_low_snr;
+       /* lower snr limit for keep alive period update */
+       __le32 keep_alive_snr_threshold_low_db;
+       /* upper snr limit for keep alive period update */
+       __le32 keep_alive_snr_threshold_high_db;
+       /* num of successive bad bcons causing link-loss */
+       __le32 bad_beacons_num_threshold;
+       /* SNR limit for bad_beacons_detector */
+       __le32 bad_beacons_snr_threshold_db;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_WRITE_CMDID */
+struct wmi_link_maintain_cfg_write_cmd {
+       /* enum wmi_link_maintain_cfg_type_e - type of requested default
+        * configuration to be applied
+        */
+       __le32 cfg_type;
+       /* requested connection ID or WMI_LINK_MAINTAIN_CFG_CID_BROADCAST */
+       __le32 cid;
+       /* custom configuration settings to be applied (relevant only if
+        * cfg_type==WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM)
+        */
+       struct wmi_link_maintain_cfg lm_cfg;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_READ_CMDID */
+struct wmi_link_maintain_cfg_read_cmd {
+       /* connection ID which configuration settings are requested */
+       __le32 cid;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */
+struct wmi_link_maintain_cfg_write_done_event {
+       /* requested connection ID */
+       __le32 cid;
+       /* wmi_link_maintain_cfg_response_status_e - write status */
+       __le32 status;
+} __packed;
+
+/* \WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENT */
+struct wmi_link_maintain_cfg_read_done_event {
+       /* requested connection ID */
+       __le32 cid;
+       /* wmi_link_maintain_cfg_response_status_e - read status */
+       __le32 status;
+       /* Retrieved configuration settings */
+       struct wmi_link_maintain_cfg lm_cfg;
+} __packed;
+
+enum wmi_traffic_deferral_status {
+       WMI_TRAFFIC_DEFERRAL_APPROVED   = 0x0,
+       WMI_TRAFFIC_DEFERRAL_REJECTED   = 0x1,
+};
+
+/* WMI_TRAFFIC_DEFERRAL_EVENTID */
+struct wmi_traffic_deferral_event {
+       /* enum wmi_traffic_deferral_status_e */
+       u8 status;
+} __packed;
+
+enum wmi_traffic_resume_status {
+       WMI_TRAFFIC_RESUME_SUCCESS      = 0x0,
+       WMI_TRAFFIC_RESUME_FAILED       = 0x1,
+};
+
+/* WMI_TRAFFIC_RESUME_EVENTID */
+struct wmi_traffic_resume_event {
+       /* enum wmi_traffic_resume_status_e */
+       u8 status;
+} __packed;
+
+/* Power Save command completion status codes */
+enum wmi_ps_cfg_cmd_status {
+       WMI_PS_CFG_CMD_STATUS_SUCCESS   = 0x00,
+       WMI_PS_CFG_CMD_STATUS_BAD_PARAM = 0x01,
+       /* other error */
+       WMI_PS_CFG_CMD_STATUS_ERROR     = 0x02,
+};
+
+/* Device Power Save Profiles */
+enum wmi_ps_profile_type {
+       WMI_PS_PROFILE_TYPE_DEFAULT             = 0x00,
+       WMI_PS_PROFILE_TYPE_PS_DISABLED         = 0x01,
+       WMI_PS_PROFILE_TYPE_MAX_PS              = 0x02,
+       WMI_PS_PROFILE_TYPE_LOW_LATENCY_PS      = 0x03,
+};
+
+/* WMI_PS_DEV_PROFILE_CFG_CMDID
+ *
+ * Power save profile to be used by the device
+ *
+ * Returned event:
+ * - WMI_PS_DEV_PROFILE_CFG_EVENTID
+ */
+struct wmi_ps_dev_profile_cfg_cmd {
+       /* wmi_ps_profile_type_e */
+       u8 ps_profile;
+       u8 reserved[3];
+} __packed;
+
+/* WMI_PS_DEV_PROFILE_CFG_EVENTID */
+struct wmi_ps_dev_profile_cfg_event {
+       /* wmi_ps_cfg_cmd_status_e */
+       __le32 status;
+} __packed;
+
+enum wmi_ps_level {
+       WMI_PS_LEVEL_DEEP_SLEEP         = 0x00,
+       WMI_PS_LEVEL_SHALLOW_SLEEP      = 0x01,
+       /* awake = all PS mechanisms are disabled */
+       WMI_PS_LEVEL_AWAKE              = 0x02,
+};
+
+enum wmi_ps_deep_sleep_clk_level {
+       /* 33k */
+       WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC         = 0x00,
+       /* 10k */
+       WMI_PS_DEEP_SLEEP_CLK_LEVEL_OSC         = 0x01,
+       /* @RTC Low latency */
+       WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC_LT      = 0x02,
+       WMI_PS_DEEP_SLEEP_CLK_LEVEL_XTAL        = 0x03,
+       WMI_PS_DEEP_SLEEP_CLK_LEVEL_SYSCLK      = 0x04,
+       /* Not Applicable */
+       WMI_PS_DEEP_SLEEP_CLK_LEVEL_N_A         = 0xFF,
+};
+
+/* Response by the FW to a D3 entry request */
+enum wmi_ps_d3_resp_policy {
+       WMI_PS_D3_RESP_POLICY_DEFAULT   = 0x00,
+       /* debug -D3 req is always denied */
+       WMI_PS_D3_RESP_POLICY_DENIED    = 0x01,
+       /* debug -D3 req is always approved */
+       WMI_PS_D3_RESP_POLICY_APPROVED  = 0x02,
+};
+
+/* Device common power save configurations */
+struct wmi_ps_dev_cfg {
+       /* lowest level of PS allowed while unassociated, enum wmi_ps_level_e
+        */
+       u8 ps_unassoc_min_level;
+       /* lowest deep sleep clock level while nonassoc, enum
+        * wmi_ps_deep_sleep_clk_level_e
+        */
+       u8 ps_unassoc_deep_sleep_min_level;
+       /* lowest level of PS allowed while associated, enum wmi_ps_level_e */
+       u8 ps_assoc_min_level;
+       /* lowest deep sleep clock level while assoc, enum
+        * wmi_ps_deep_sleep_clk_level_e
+        */
+       u8 ps_assoc_deep_sleep_min_level;
+       /* enum wmi_ps_deep_sleep_clk_level_e */
+       u8 ps_assoc_low_latency_ds_min_level;
+       /* enum wmi_ps_d3_resp_policy_e */
+       u8 ps_D3_response_policy;
+       /* BOOL */
+       u8 ps_D3_pm_pme_enabled;
+       /* BOOL */
+       u8 ps_halp_enable;
+       u8 ps_deep_sleep_enter_thresh_msec;
+       /* BOOL */
+       u8 ps_voltage_scaling_en;
+} __packed;
+
+/* WMI_PS_DEV_CFG_CMDID
+ *
+ * Configure common Power Save parameters of the device and all MIDs.
+ *
+ * Returned event:
+ * - WMI_PS_DEV_CFG_EVENTID
+ */
+struct wmi_ps_dev_cfg_cmd {
+       /* Device Power Save configuration to be applied */
+       struct wmi_ps_dev_cfg ps_dev_cfg;
+       /* alignment to 32b */
+       u8 reserved[2];
+} __packed;
+
+/* WMI_PS_DEV_CFG_EVENTID */
+struct wmi_ps_dev_cfg_event {
+       /* wmi_ps_cfg_cmd_status_e */
+       __le32 status;
+} __packed;
+
+/* WMI_PS_DEV_CFG_READ_CMDID
+ *
+ * request to retrieve  device Power Save configuration
+ * (WMI_PS_DEV_CFG_CMD params)
+ *
+ * Returned event:
+ * - WMI_PS_DEV_CFG_READ_EVENTID
+ */
+struct wmi_ps_dev_cfg_read_cmd {
+       __le32 reserved;
+} __packed;
+
+/* WMI_PS_DEV_CFG_READ_EVENTID */
+struct wmi_ps_dev_cfg_read_event {
+       /* wmi_ps_cfg_cmd_status_e */
+       __le32 status;
+       /* Retrieved device Power Save configuration (WMI_PS_DEV_CFG_CMD
+        * params)
+        */
+       struct wmi_ps_dev_cfg dev_ps_cfg;
+       /* alignment to 32b */
+       u8 reserved[2];
+} __packed;
+
+/* Per Mac Power Save configurations */
+struct wmi_ps_mid_cfg {
+       /* Low power RX in BTI is enabled, BOOL */
+       u8 beacon_lprx_enable;
+       /* Sync to sector ID enabled, BOOL */
+       u8 beacon_sync_to_sectorId_enable;
+       /* Low power RX in DTI is enabled, BOOL */
+       u8 frame_exchange_lprx_enable;
+       /* Sleep Cycle while in scheduled PS, 1-31 */
+       u8 scheduled_sleep_cycle_pow2;
+       /* Stay Awake for k BIs every (sleep_cycle - k) BIs, 1-31 */
+       u8 scheduled_num_of_awake_bis;
+       u8 am_to_traffic_load_thresh_mbp;
+       u8 traffic_to_am_load_thresh_mbps;
+       u8 traffic_to_am_num_of_no_traffic_bis;
+       /* BOOL */
+       u8 continuous_traffic_psm;
+       __le16 no_traffic_to_min_usec;
+       __le16 no_traffic_to_max_usec;
+       __le16 snoozing_sleep_interval_milisec;
+       u8 max_no_data_awake_events;
+       /* Trigger WEB after k failed beacons */
+       u8 num_of_failed_beacons_rx_to_trigger_web;
+       /* Trigger BF after k failed beacons */
+       u8 num_of_failed_beacons_rx_to_trigger_bf;
+       /* Trigger SOB after k successful beacons */
+       u8 num_of_successful_beacons_rx_to_trigger_sob;
+} __packed;
+
+/* WMI_PS_MID_CFG_CMDID
+ *
+ * Configure Power Save parameters of a specific MID.
+ * These parameters are relevant for the specific BSS this MID belongs to.
+ *
+ * Returned event:
+ * - WMI_PS_MID_CFG_EVENTID
+ */
+struct wmi_ps_mid_cfg_cmd {
+       /* MAC ID */
+       u8 mid;
+       /* mid PS configuration to be applied */
+       struct wmi_ps_mid_cfg ps_mid_cfg;
+} __packed;
+
+/* WMI_PS_MID_CFG_EVENTID */
+struct wmi_ps_mid_cfg_event {
+       /* MAC ID */
+       u8 mid;
+       /* alignment to 32b */
+       u8 reserved[3];
+       /* wmi_ps_cfg_cmd_status_e */
+       __le32 status;
+} __packed;
+
+/* WMI_PS_MID_CFG_READ_CMDID
+ *
+ * request to retrieve Power Save configuration of mid
+ * (WMI_PS_MID_CFG_CMD params)
+ *
+ * Returned event:
+ * - WMI_PS_MID_CFG_READ_EVENTID
+ */
+struct wmi_ps_mid_cfg_read_cmd {
+       /* MAC ID */
+       u8 mid;
+       /* alignment to 32b */
+       u8 reserved[3];
+} __packed;
+
+/* WMI_PS_MID_CFG_READ_EVENTID */
+struct wmi_ps_mid_cfg_read_event {
+       /* MAC ID */
+       u8 mid;
+       /* Retrieved MID Power Save configuration(WMI_PS_MID_CFG_CMD params) */
+       struct wmi_ps_mid_cfg mid_ps_cfg;
+       /* wmi_ps_cfg_cmd_status_e */
+       __le32 status;
+} __packed;
+
+#define WMI_AOA_MAX_DATA_SIZE  (128)
+
+enum wmi_aoa_meas_status {
+       WMI_AOA_MEAS_SUCCESS            = 0x00,
+       WMI_AOA_MEAS_PEER_INCAPABLE     = 0x01,
+       WMI_AOA_MEAS_FAILURE            = 0x02,
+};
+
+/* WMI_AOA_MEAS_EVENTID */
+struct wmi_aoa_meas_event {
+       u8 mac_addr[WMI_MAC_LEN];
+       /* channels IDs:
+        * 0 - 58320 MHz
+        * 1 - 60480 MHz
+        * 2 - 62640 MHz
+        */
+       u8 channel;
+       /* enum wmi_aoa_meas_type */
+       u8 aoa_meas_type;
+       /* Measurments are from RFs, defined by the mask */
+       __le32 meas_rf_mask;
+       /* enum wmi_aoa_meas_status */
+       u8 meas_status;
+       u8 reserved;
+       /* Length of meas_data in bytes */
+       __le16 length;
+       u8 meas_data[WMI_AOA_MAX_DATA_SIZE];
+} __packed;
+
+/* WMI_TOF_GET_CAPABILITIES_EVENTID */
+struct wmi_tof_get_capabilities_event {
+       u8 ftm_capability;
+       /* maximum supported number of destination to start TOF */
+       u8 max_num_of_dest;
+       /* maximum supported number of measurements per burst */
+       u8 max_num_of_meas_per_burst;
+       u8 reserved;
+       /* maximum supported multi bursts */
+       __le16 max_multi_bursts_sessions;
+       /* maximum supported FTM burst duration , wmi_tof_burst_duration_e */
+       __le16 max_ftm_burst_duration;
+       /* AOA supported types */
+       __le32 aoa_supported_types;
+} __packed;
+
+enum wmi_tof_session_end_status {
+       WMI_TOF_SESSION_END_NO_ERROR            = 0x00,
+       WMI_TOF_SESSION_END_FAIL                = 0x01,
+       WMI_TOF_SESSION_END_PARAMS_ERROR        = 0x02,
+       WMI_TOF_SESSION_END_ABORTED             = 0x03,
+};
+
+/* WMI_TOF_SESSION_END_EVENTID */
+struct wmi_tof_session_end_event {
+       /* FTM session ID */
+       __le32 session_id;
+       /* wmi_tof_session_end_status_e */
+       u8 status;
+       u8 reserved[3];
+} __packed;
+
+/* Responder FTM Results */
+struct wmi_responder_ftm_res {
+       u8 t1[6];
+       u8 t2[6];
+       u8 t3[6];
+       u8 t4[6];
+       __le16 tod_err;
+       __le16 toa_err;
+       __le16 tod_err_initiator;
+       __le16 toa_err_initiator;
+} __packed;
+
+enum wmi_tof_ftm_per_dest_res_status {
+       WMI_PER_DEST_RES_NO_ERROR               = 0x00,
+       WMI_PER_DEST_RES_TX_RX_FAIL             = 0x01,
+       WMI_PER_DEST_RES_PARAM_DONT_MATCH       = 0x02,
+};
+
+enum wmi_tof_ftm_per_dest_res_flags {
+       WMI_PER_DEST_RES_REQ_START              = 0x01,
+       WMI_PER_DEST_RES_BURST_REPORT_END       = 0x02,
+       WMI_PER_DEST_RES_REQ_END                = 0x04,
+       WMI_PER_DEST_RES_PARAM_UPDATE           = 0x08,
+};
+
+/* WMI_TOF_FTM_PER_DEST_RES_EVENTID */
+struct wmi_tof_ftm_per_dest_res_event {
+       /* FTM session ID */
+       __le32 session_id;
+       /* destination MAC address */
+       u8 dst_mac[WMI_MAC_LEN];
+       /* wmi_tof_ftm_per_dest_res_flags_e */
+       u8 flags;
+       /* wmi_tof_ftm_per_dest_res_status_e */
+       u8 status;
+       /* responder ASAP */
+       u8 responder_asap;
+       /* responder number of FTM per burst */
+       u8 responder_num_ftm_per_burst;
+       /* responder number of FTM burst exponent */
+       u8 responder_num_ftm_bursts_exp;
+       /* responder burst duration ,wmi_tof_burst_duration_e */
+       u8 responder_burst_duration;
+       /* responder burst period, indicate interval between two consecutive
+        * burst instances, in units of 100 ms
+        */
+       __le16 responder_burst_period;
+       /* receive burst counter */
+       __le16 bursts_cnt;
+       /* tsf of responder start burst */
+       __le32 tsf_sync;
+       /* actual received ftm per burst */
+       u8 actual_ftm_per_burst;
+       u8 reserved0[7];
+       struct wmi_responder_ftm_res responder_ftm_res[0];
+} __packed;
+
+enum wmi_tof_channel_info_type {
+       WMI_TOF_CHANNEL_INFO_AOA                = 0x00,
+       WMI_TOF_CHANNEL_INFO_LCI                = 0x01,
+       WMI_TOF_CHANNEL_INFO_LCR                = 0x02,
+       WMI_TOF_CHANNEL_INFO_VENDOR_SPECIFIC    = 0x03,
+       WMI_TOF_CHANNEL_INFO_CIR                = 0x04,
+       WMI_TOF_CHANNEL_INFO_RSSI               = 0x05,
+       WMI_TOF_CHANNEL_INFO_SNR                = 0x06,
+       WMI_TOF_CHANNEL_INFO_DEBUG              = 0x07,
+};
+
+/* WMI_TOF_CHANNEL_INFO_EVENTID */
+struct wmi_tof_channel_info_event {
+       /* FTM session ID */
+       __le32 session_id;
+       /* destination MAC address */
+       u8 dst_mac[WMI_MAC_LEN];
+       /* wmi_tof_channel_info_type_e */
+       u8 type;
+       /* data report length */
+       u8 len;
+       /* data report payload */
+       u8 report[0];
+} __packed;
+
 #endif /* __WILOCITY_WMI_H__ */
index f549c25..03404cb 100644 (file)
@@ -1101,6 +1101,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
+       BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
index 2628d5e..748eaa6 100644 (file)
@@ -3884,11 +3884,11 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
        if (!check_vif_up(ifp->vif))
                return -EIO;
 
-       brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", &pmksa->bssid);
+       brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", pmksa->bssid);
 
        npmk = le32_to_cpu(cfg->pmk_list.npmk);
        for (i = 0; i < npmk; i++)
-               if (!memcmp(&pmksa->bssid, &pmk[i].bssid, ETH_ALEN))
+               if (!memcmp(pmksa->bssid, pmk[i].bssid, ETH_ALEN))
                        break;
 
        if ((npmk > 0) && (i < npmk)) {
@@ -4527,7 +4527,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
                                (u8 *)&settings->beacon.head[ie_offset],
                                settings->beacon.head_len - ie_offset,
                                WLAN_EID_SSID);
-               if (!ssid_ie)
+               if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN)
                        return -EINVAL;
 
                memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
@@ -5635,7 +5635,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
                  ifevent->action, ifevent->flags, ifevent->ifidx,
                  ifevent->bsscfgidx);
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        event->action = ifevent->action;
        vif = event->vif;
 
@@ -5643,7 +5643,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
        case BRCMF_E_IF_ADD:
                /* waiting process may have timed out */
                if (!cfg->vif_event.vif) {
-                       mutex_unlock(&event->vif_event_lock);
+                       spin_unlock(&event->vif_event_lock);
                        return -EBADF;
                }
 
@@ -5654,24 +5654,24 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
                        ifp->ndev->ieee80211_ptr = &vif->wdev;
                        SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
                }
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                wake_up(&event->vif_wq);
                return 0;
 
        case BRCMF_E_IF_DEL:
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                /* event may not be upon user request */
                if (brcmf_cfg80211_vif_event_armed(cfg))
                        wake_up(&event->vif_wq);
                return 0;
 
        case BRCMF_E_IF_CHANGE:
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                wake_up(&event->vif_wq);
                return 0;
 
        default:
-               mutex_unlock(&event->vif_event_lock);
+               spin_unlock(&event->vif_event_lock);
                break;
        }
        return -EINVAL;
@@ -5792,7 +5792,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
 static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
 {
        init_waitqueue_head(&event->vif_wq);
-       mutex_init(&event->vif_event_lock);
+       spin_lock_init(&event->vif_event_lock);
 }
 
 static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
@@ -6691,9 +6691,9 @@ static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
 {
        u8 evt_action;
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        evt_action = event->action;
-       mutex_unlock(&event->vif_event_lock);
+       spin_unlock(&event->vif_event_lock);
        return evt_action == action;
 }
 
@@ -6702,10 +6702,10 @@ void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
 {
        struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        event->vif = vif;
        event->action = 0;
-       mutex_unlock(&event->vif_event_lock);
+       spin_unlock(&event->vif_event_lock);
 }
 
 bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
@@ -6713,9 +6713,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
        struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
        bool armed;
 
-       mutex_lock(&event->vif_event_lock);
+       spin_lock(&event->vif_event_lock);
        armed = event->vif != NULL;
-       mutex_unlock(&event->vif_event_lock);
+       spin_unlock(&event->vif_event_lock);
 
        return armed;
 }
index 7d77f86..8889832 100644 (file)
@@ -227,7 +227,7 @@ struct escan_info {
  */
 struct brcmf_cfg80211_vif_event {
        wait_queue_head_t vif_wq;
-       struct mutex vif_event_lock;
+       spinlock_t vif_event_lock;
        u8 action;
        struct brcmf_cfg80211_vif *vif;
 };
index 8d16f02..65e8c87 100644 (file)
@@ -743,7 +743,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
                 * serious troublesome side effects. The p2p module will clean
                 * up the ifp if needed.
                 */
-               brcmf_p2p_ifp_removed(ifp);
+               brcmf_p2p_ifp_removed(ifp, rtnl_locked);
                kfree(ifp);
        }
 }
index 66f942f..de19c7c 100644 (file)
@@ -2297,7 +2297,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
        return err;
 }
 
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
 {
        struct brcmf_cfg80211_info *cfg;
        struct brcmf_cfg80211_vif *vif;
@@ -2306,9 +2306,11 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
        vif = ifp->vif;
        cfg = wdev_to_cfg(&vif->wdev);
        cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
-       rtnl_lock();
+       if (!rtnl_locked)
+               rtnl_lock();
        cfg80211_unregister_wdev(&vif->wdev);
-       rtnl_unlock();
+       if (!rtnl_locked)
+               rtnl_unlock();
        brcmf_free_vif(vif);
 }
 
index a3bd18c..8ce9447 100644 (file)
@@ -155,7 +155,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
 int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
 int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
                       enum brcmf_fil_p2p_if_types if_type);
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp);
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked);
 int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
 void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
 int brcmf_p2p_scan_prep(struct wiphy *wiphy,
index 68ab3ac..b892dac 100644 (file)
@@ -313,6 +313,7 @@ struct rte_console {
 
 #define KSO_WAIT_US 50
 #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
+#define BRCMF_SDIO_MAX_ACCESS_ERRORS   5
 
 /*
  * Conversion of 802.1D priority to precedence level
@@ -677,6 +678,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 {
        u8 wr_val = 0, rd_val, cmp_val, bmask;
        int err = 0;
+       int err_cnt = 0;
        int try_cnt = 0;
 
        brcmf_dbg(TRACE, "Enter: on=%d\n", on);
@@ -712,9 +714,14 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
                 */
                rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
                                           &err);
-               if (((rd_val & bmask) == cmp_val) && !err)
+               if (!err) {
+                       if ((rd_val & bmask) == cmp_val)
+                               break;
+                       err_cnt = 0;
+               }
+               /* bail out upon subsequent access errors */
+               if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS))
                        break;
-
                udelay(KSO_WAIT_US);
                brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
                                  wr_val, &err);
@@ -3757,7 +3764,8 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
        u32 val, rev;
 
        val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
-       if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+       if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 ||
+            sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) &&
            addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
                rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
                if (rev >= 2) {
index a10f35c..fe67559 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef __CHECKER__
 #define CREATE_TRACE_POINTS
 #include "tracepoint.h"
+#include "debug.h"
 
 void __brcmf_err(const char *func, const char *fmt, ...)
 {
index fa26619..2f978a3 100644 (file)
@@ -1458,11 +1458,15 @@ static int brcmf_usb_reset_resume(struct usb_interface *intf)
 #define BRCMF_USB_DEVICE(dev_id)       \
        { USB_DEVICE(BRCM_USB_VENDOR_ID_BROADCOM, dev_id) }
 
+#define LINKSYS_USB_DEVICE(dev_id)     \
+       { USB_DEVICE(BRCM_USB_VENDOR_ID_LINKSYS, dev_id) }
+
 static struct usb_device_id brcmf_usb_devid_table[] = {
        BRCMF_USB_DEVICE(BRCM_USB_43143_DEVICE_ID),
        BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID),
        BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID),
        BRCMF_USB_DEVICE(BRCM_USB_43569_DEVICE_ID),
+       LINKSYS_USB_DEVICE(BRCM_USB_43235_LINKSYS_DEVICE_ID),
        { USB_DEVICE(BRCM_USB_VENDOR_ID_LG, BRCM_USB_43242_LG_DEVICE_ID) },
        /* special entry for device with firmware loaded and running */
        BRCMF_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID),
index 3cc42be..d0407d9 100644 (file)
@@ -22,6 +22,7 @@
 
 #define BRCM_USB_VENDOR_ID_BROADCOM    0x0a5c
 #define BRCM_USB_VENDOR_ID_LG          0x043e
+#define BRCM_USB_VENDOR_ID_LINKSYS     0x13b1
 #define BRCM_PCIE_VENDOR_ID_BROADCOM   PCI_VENDOR_ID_BROADCOM
 
 /* Chipcommon Core Chip IDs */
@@ -58,6 +59,7 @@
 
 /* USB Device IDs */
 #define BRCM_USB_43143_DEVICE_ID       0xbd1e
+#define BRCM_USB_43235_LINKSYS_DEVICE_ID       0x0039
 #define BRCM_USB_43236_DEVICE_ID       0xbd17
 #define BRCM_USB_43242_DEVICE_ID       0xbd1f
 #define BRCM_USB_43242_LG_DEVICE_ID    0x3101
index 209dc99..4db327a 100644 (file)
@@ -2671,7 +2671,7 @@ const struct il_ops il3945_ops = {
        .send_led_cmd = il3945_send_led_cmd,
 };
 
-static struct il_cfg il3945_bg_cfg = {
+static const struct il_cfg il3945_bg_cfg = {
        .name = "3945BG",
        .fw_name_pre = IL3945_FW_PRE,
        .ucode_api_max = IL3945_UCODE_API_MAX,
@@ -2700,7 +2700,7 @@ static struct il_cfg il3945_bg_cfg = {
        },
 };
 
-static struct il_cfg il3945_abg_cfg = {
+static const struct il_cfg il3945_abg_cfg = {
        .name = "3945ABG",
        .fw_name_pre = IL3945_FW_PRE,
        .ucode_api_max = IL3945_UCODE_API_MAX,
index 726ede3..3bba521 100644 (file)
@@ -1320,7 +1320,7 @@ struct il_priv {
        u64 timestamp;
 
        union {
-#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+#if IS_ENABLED(CONFIG_IWL3945)
                struct {
                        void *shared_virt;
                        dma_addr_t shared_phys;
@@ -1351,7 +1351,7 @@ struct il_priv {
 
                } _3945;
 #endif
-#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+#if IS_ENABLED(CONFIG_IWL4965)
                struct {
                        struct il_rx_phy_res last_phy_res;
                        bool last_phy_res_valid;
index fbaf705..1fec6af 100644 (file)
 #define IWL9000_SMEM_OFFSET            0x400000
 #define IWL9000_SMEM_LEN               0x68000
 
-#define  IWL9000_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
+#define  IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
 #define  IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
-#define  IWL9260LC_FW_PRE "iwlwifi-9260-th-a0-lc-a0-"
+#define  IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
 #define IWL9000_MODULE_FIRMWARE(api) \
        IWL9000_FW_PRE "-" __stringify(api) ".ucode"
 #define IWL9260_MODULE_FIRMWARE(api) \
        IWL9260_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL9260LC_MODULE_FIRMWARE(api) \
-       IWL9260LC_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9000LC_MODULE_FIRMWARE(api) \
+       IWL9000LC_FW_PRE "-" __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_9000         10
 
@@ -146,41 +146,62 @@ static const struct iwl_tt_params iwl9000_tt_params = {
        .mac_addr_from_csr = true,                                      \
        .rf_id = true
 
+const struct iwl_cfg iwl9160_2ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC 9160",
+       .fw_name_pre = IWL9260_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
 const struct iwl_cfg iwl9260_2ac_cfg = {
-               .name = "Intel(R) Dual Band Wireless AC 9260",
-               .fw_name_pre = IWL9260_FW_PRE,
-               IWL_DEVICE_9000,
-               .ht_params = &iwl9000_ht_params,
-               .nvm_ver = IWL9000_NVM_VERSION,
-               .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
-               .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .name = "Intel(R) Dual Band Wireless AC 9260",
+       .fw_name_pre = IWL9260_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9270_2ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC 9270",
+       .fw_name_pre = IWL9260_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC 9460",
+       .fw_name_pre = IWL9000_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .integrated = true,
 };
 
 /*
  * TODO the struct below is for internal testing only this should be
  * removed by EO 2016~
  */
-const struct iwl_cfg iwl9260lc_2ac_cfg = {
-               .name = "Intel(R) Dual Band Wireless AC 9260",
-               .fw_name_pre = IWL9260LC_FW_PRE,
-               IWL_DEVICE_9000,
-               .ht_params = &iwl9000_ht_params,
-               .nvm_ver = IWL9000_NVM_VERSION,
-               .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
-               .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwl5165_2ac_cfg = {
-               .name = "Intel(R) Dual Band Wireless AC 5165",
-               .fw_name_pre = IWL9000_FW_PRE,
-               IWL_DEVICE_9000,
-               .ht_params = &iwl9000_ht_params,
-               .nvm_ver = IWL9000_NVM_VERSION,
-               .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
-               .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-               .integrated = true,
+const struct iwl_cfg iwl9000lc_2ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC 9000",
+       .fw_name_pre = IWL9000LC_FW_PRE,
+       IWL_DEVICE_9000,
+       .ht_params = &iwl9000_ht_params,
+       .nvm_ver = IWL9000_NVM_VERSION,
+       .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+       .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+       .integrated = true,
 };
 
 MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9260LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
index 423b233..7008319 100644 (file)
@@ -449,9 +449,11 @@ extern const struct iwl_cfg iwl4165_2ac_cfg;
 extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
 extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
 extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
+extern const struct iwl_cfg iwl9000lc_2ac_cfg;
+extern const struct iwl_cfg iwl9160_2ac_cfg;
 extern const struct iwl_cfg iwl9260_2ac_cfg;
-extern const struct iwl_cfg iwl9260lc_2ac_cfg;
-extern const struct iwl_cfg iwl5165_2ac_cfg;
+extern const struct iwl_cfg iwl9270_2ac_cfg;
+extern const struct iwl_cfg iwl9460_2ac_cfg;
 extern const struct iwl_cfg iwla000_2ac_cfg;
 #endif /* CONFIG_IWLMVM */
 
index 1d6f5d2..dd75ea7 100644 (file)
@@ -77,7 +77,6 @@
  */
 #define FH_MEM_LOWER_BOUND                   (0x1000)
 #define FH_MEM_UPPER_BOUND                   (0x2000)
-#define TFH_MEM_LOWER_BOUND                  (0xA06000)
 
 /**
  * Keep-Warm (KW) buffer base address.
 #define FH_MEM_CBBC_20_31_LOWER_BOUND          (FH_MEM_LOWER_BOUND + 0xB20)
 #define FH_MEM_CBBC_20_31_UPPER_BOUND          (FH_MEM_LOWER_BOUND + 0xB80)
 /* a000 TFD table address, 64 bit */
-#define TFH_TFDQ_CBB_TABLE                     (TFH_MEM_LOWER_BOUND + 0x1C00)
+#define TFH_TFDQ_CBB_TABLE                     (0x1C00)
 
 /* Find TFD CB base pointer for given queue */
 static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
@@ -156,7 +155,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
  * In case of DRAM read address which is not aligned to 128B, the TFH will
  * enable transfer size which doesn't cross 64B DRAM address boundary.
 */
-#define TFH_TRANSFER_MODE              (TFH_MEM_LOWER_BOUND + 0x1F40)
+#define TFH_TRANSFER_MODE              (0x1F40)
 #define TFH_TRANSFER_MAX_PENDING_REQ   0xc
 #define TFH_CHUNK_SIZE_128                     BIT(8)
 #define TFH_CHUNK_SPLIT_MODE           BIT(10)
@@ -167,7 +166,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
  * the start of the TFD first TB.
  * In case of a DRAM Tx CMD update the TFH will update PN and Key ID
  */
-#define TFH_TXCMD_UPDATE_CFG           (TFH_MEM_LOWER_BOUND + 0x1F48)
+#define TFH_TXCMD_UPDATE_CFG           (0x1F48)
 /*
  * Controls TX DMA operation
  *
@@ -181,22 +180,22 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
  * set to 1 - interrupt is sent to the driver
  * Bit 0: Indicates the snoop configuration
 */
-#define TFH_SRV_DMA_CHNL0_CTRL (TFH_MEM_LOWER_BOUND + 0x1F60)
+#define TFH_SRV_DMA_CHNL0_CTRL (0x1F60)
 #define TFH_SRV_DMA_SNOOP      BIT(0)
 #define TFH_SRV_DMA_TO_DRIVER  BIT(24)
 #define TFH_SRV_DMA_START      BIT(31)
 
 /* Defines the DMA SRAM write start address to transfer a data block */
-#define TFH_SRV_DMA_CHNL0_SRAM_ADDR    (TFH_MEM_LOWER_BOUND + 0x1F64)
+#define TFH_SRV_DMA_CHNL0_SRAM_ADDR    (0x1F64)
 
 /* Defines the 64bits DRAM start address to read the DMA data block from */
-#define TFH_SRV_DMA_CHNL0_DRAM_ADDR    (TFH_MEM_LOWER_BOUND + 0x1F68)
+#define TFH_SRV_DMA_CHNL0_DRAM_ADDR    (0x1F68)
 
 /*
  * Defines the number of bytes to transfer from DRAM to SRAM.
  * Note that this register may be configured with non-dword aligned size.
  */
-#define TFH_SRV_DMA_CHNL0_BC   (TFH_MEM_LOWER_BOUND + 0x1F70)
+#define TFH_SRV_DMA_CHNL0_BC   (0x1F70)
 
 /**
  * Rx SRAM Control and Status Registers (RSCSR)
index 459bf73..406ef30 100644 (file)
 #define OSC_CLK_FORCE_CONTROL          (0x8)
 
 #define FH_UCODE_LOAD_STATUS           (0x1AF0)
-#define CSR_UCODE_LOAD_STATUS_ADDR     (0x1E70)
-enum secure_load_status_reg {
-       LMPM_CPU_UCODE_LOADING_STARTED                  = 0x00000001,
-       LMPM_CPU_HDRS_LOADING_COMPLETED                 = 0x00000003,
-       LMPM_CPU_UCODE_LOADING_COMPLETED                = 0x00000007,
-       LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED           = 0x000000F8,
-       LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK        = 0x0000FF00,
-};
 
-#define LMPM_SECURE_INSPECTOR_CODE_ADDR        (0x1E38)
-#define LMPM_SECURE_INSPECTOR_DATA_ADDR        (0x1E3C)
+/*
+ * Replacing FH_UCODE_LOAD_STATUS
+ * This register is writen by driver and is read by uCode during boot flow.
+ * Note this address is cleared after MAC reset.
+ */
+#define UREG_UCODE_LOAD_STATUS         (0xa05c40)
+
 #define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR   (0x1E78)
 #define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR   (0x1E7C)
 
-#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE   (0x400000)
-#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE   (0x402000)
 #define LMPM_SECURE_CPU1_HDR_MEM_SPACE         (0x420000)
 #define LMPM_SECURE_CPU2_HDR_MEM_SPACE         (0x420400)
 
index d1c4fb8..6c8e3ca 100644 (file)
@@ -432,26 +432,43 @@ struct iwl_mvm_rm_sta_cmd {
        u8 reserved[3];
 } __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
 
+/**
+ * struct iwl_mvm_mgmt_mcast_key_cmd_v1
+ * ( MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: %iwl_sta_key_flag
+ * @igtk:
+ * @k1: unused
+ * @k2: unused
+ * @sta_id: station ID that support IGTK
+ * @key_id:
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwl_mvm_mgmt_mcast_key_cmd_v1 {
+       __le32 ctrl_flags;
+       u8 igtk[16];
+       u8 k1[16];
+       u8 k2[16];
+       __le32 key_id;
+       __le32 sta_id;
+       __le64 receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
 /**
  * struct iwl_mvm_mgmt_mcast_key_cmd
  * ( MGMT_MCAST_KEY = 0x1f )
  * @ctrl_flags: %iwl_sta_key_flag
- * @IGTK:
- * @K1: unused
- * @K2: unused
+ * @igtk: IGTK master key
  * @sta_id: station ID that support IGTK
  * @key_id:
  * @receive_seq_cnt: initial RSC/PN needed for replay check
  */
 struct iwl_mvm_mgmt_mcast_key_cmd {
        __le32 ctrl_flags;
-       u8 IGTK[16];
-       u8 K1[16];
-       u8 K2[16];
+       u8 igtk[32];
        __le32 key_id;
        __le32 sta_id;
        __le64 receive_seq_cnt;
-} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */
 
 struct iwl_mvm_wep_key {
        u8 key_index;
index 4144623..6b4c63a 100644 (file)
@@ -675,13 +675,21 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
                            tx_resp->frame_count) & 0xfff;
 }
 
+/* Available options for the SCD_QUEUE_CFG HCMD */
+enum iwl_scd_cfg_actions {
+       SCD_CFG_DISABLE_QUEUE           = 0x0,
+       SCD_CFG_ENABLE_QUEUE            = 0x1,
+       SCD_CFG_UPDATE_QUEUE_TID        = 0x2,
+};
+
 /**
  * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
  * @token:
  * @sta_id: station id
  * @tid:
  * @scd_queue: scheduler queue to confiug
- * @enable: 1 queue enable, 0 queue disable
+ * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner
+ *     Value is one of %iwl_scd_cfg_actions options
  * @aggregate: 1 aggregated queue, 0 otherwise
  * @tx_fifo: %enum iwl_mvm_tx_fifo
  * @window: BA window size
@@ -692,7 +700,7 @@ struct iwl_scd_txq_cfg_cmd {
        u8 sta_id;
        u8 tid;
        u8 scd_queue;
-       u8 enable;
+       u8 action;
        u8 aggregate;
        u8 tx_fifo;
        u8 window;
index 71076f0..57b574b 100644 (file)
@@ -482,13 +482,17 @@ struct iwl_nvm_access_cmd {
  * @block_size: the block size in powers of 2
  * @block_num: number of blocks specified in the command.
  * @device_phy_addr: virtual addresses from device side
+ *     32 bit address for API version 1, 64 bit address for API version 2.
 */
 struct iwl_fw_paging_cmd {
        __le32 flags;
        __le32 block_size;
        __le32 block_num;
-       __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
-} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+       union {
+               __le32 addr32[NUM_OF_FW_PAGING_BLOCKS];
+               __le64 addr64[NUM_OF_FW_PAGING_BLOCKS];
+       } device_phy_addr;
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_2 */
 
 /*
  * Fw items ID's
index 1abcabb..46b52bf 100644 (file)
@@ -960,5 +960,6 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
        }
 
        mvm->fw_dbg_conf = conf_id;
-       return ret;
+
+       return 0;
 }
index f7dff76..e9f1be9 100644 (file)
@@ -105,7 +105,8 @@ iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
 {
        u32 trig_vif = le32_to_cpu(trig->vif_type);
 
-       return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
+       return trig_vif == IWL_FW_DBG_CONF_VIF_ANY ||
+              ieee80211_vif_type_p2p(vif) == trig_vif;
 }
 
 static inline bool
index 7e0cdbf..47e8e70 100644 (file)
@@ -385,9 +385,7 @@ static int iwl_save_fw_paging(struct iwl_mvm *mvm,
 /* send paging cmd to FW in case CPU2 has paging image */
 static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
 {
-       int blk_idx;
-       __le32 dev_phy_addr;
-       struct iwl_fw_paging_cmd fw_paging_cmd = {
+       struct iwl_fw_paging_cmd paging_cmd = {
                .flags =
                        cpu_to_le32(PAGING_CMD_IS_SECURED |
                                    PAGING_CMD_IS_ENABLED |
@@ -396,18 +394,32 @@ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
                .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
                .block_num = cpu_to_le32(mvm->num_of_paging_blk),
        };
+       int blk_idx, size = sizeof(paging_cmd);
+
+       /* A bit hard coded - but this is the old API and will be deprecated */
+       if (!iwl_mvm_has_new_tx_api(mvm))
+               size -= NUM_OF_FW_PAGING_BLOCKS * 4;
 
        /* loop for for all paging blocks + CSS block */
        for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
-               dev_phy_addr =
-                       cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
-                                   PAGE_2_EXP_SIZE);
-               fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+               dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
+
+               addr = addr >> PAGE_2_EXP_SIZE;
+
+               if (iwl_mvm_has_new_tx_api(mvm)) {
+                       __le64 phy_addr = cpu_to_le64(addr);
+
+                       paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
+               } else {
+                       __le32 phy_addr = cpu_to_le32(addr);
+
+                       paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
+               }
        }
 
        return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
                                                    IWL_ALWAYS_LONG_GROUP, 0),
-                                   0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+                                   0, size, &paging_cmd);
 }
 
 /*
index 6d60645..9506e65 100644 (file)
@@ -465,7 +465,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
        hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 
-       BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4);
+       BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
        memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
        hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
        hw->wiphy->cipher_suites = mvm->ciphers;
@@ -490,6 +490,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                mvm->ciphers[hw->wiphy->n_cipher_suites] =
                        WLAN_CIPHER_SUITE_AES_CMAC;
                hw->wiphy->n_cipher_suites++;
+               if (iwl_mvm_has_new_rx_api(mvm)) {
+                       mvm->ciphers[hw->wiphy->n_cipher_suites] =
+                               WLAN_CIPHER_SUITE_BIP_GMAC_128;
+                       hw->wiphy->n_cipher_suites++;
+                       mvm->ciphers[hw->wiphy->n_cipher_suites] =
+                               WLAN_CIPHER_SUITE_BIP_GMAC_256;
+                       hw->wiphy->n_cipher_suites++;
+               }
        }
 
        /* currently FW API supports only one optional cipher scheme */
@@ -624,6 +632,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
                               NL80211_FEATURE_LOW_PRIORITY_SCAN |
                               NL80211_FEATURE_P2P_GO_OPPPS |
+                              NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
                               NL80211_FEATURE_DYNAMIC_SMPS |
                               NL80211_FEATURE_STATIC_SMPS |
                               NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
@@ -2746,6 +2755,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+       case WLAN_CIPHER_SUITE_BIP_GMAC_256:
                WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
                break;
        case WLAN_CIPHER_SUITE_WEP40:
@@ -2779,9 +2790,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                         * GTK on AP interface is a TX-only key, return 0;
                         * on IBSS they're per-station and because we're lazy
                         * we don't support them for RX, so do the same.
-                        * CMAC in AP/IBSS modes must be done in software.
+                        * CMAC/GMAC in AP/IBSS modes must be done in software.
                         */
-                       if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+                       if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+                           key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+                           key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
                                ret = -EOPNOTSUPP;
                        else
                                ret = 0;
index b4fc86d..2e30990 100644 (file)
@@ -467,6 +467,8 @@ struct iwl_mvm_vif {
 static inline struct iwl_mvm_vif *
 iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
 {
+       if (!vif)
+               return NULL;
        return (void *)vif->drv_priv;
 }
 
@@ -697,6 +699,10 @@ struct iwl_mvm_baid_data {
  *     it. In this state, when a new queue is needed to be allocated but no
  *     such free queue exists, an inactive queue might be freed and given to
  *     the new RA/TID.
+ * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
+ *     This is the state of a queue that has had traffic pass through it, but
+ *     needs to be reconfigured for some reason, e.g. the queue needs to
+ *     become unshared and aggregations re-enabled on.
  */
 enum iwl_mvm_queue_status {
        IWL_MVM_QUEUE_FREE,
@@ -704,10 +710,11 @@ enum iwl_mvm_queue_status {
        IWL_MVM_QUEUE_READY,
        IWL_MVM_QUEUE_SHARED,
        IWL_MVM_QUEUE_INACTIVE,
+       IWL_MVM_QUEUE_RECONFIGURING,
 };
 
 #define IWL_MVM_DQA_QUEUE_TIMEOUT      (5 * HZ)
-#define IWL_MVM_NUM_CIPHERS             8
+#define IWL_MVM_NUM_CIPHERS             10
 
 struct iwl_mvm {
        /* for logger access */
@@ -767,6 +774,7 @@ struct iwl_mvm {
                u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
                bool reserved; /* Is this the TXQ reserved for a STA */
                u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
+               u8 txq_tid; /* The TID "owner" of this queue*/
                u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
                /* Timestamp for inactivation per TID of this queue */
                unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
@@ -1122,6 +1130,18 @@ static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
                (mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3);
 }
 
+static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
+{
+       return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
+              (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
+}
+
+static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
+{
+       return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
+              (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
+}
+
 static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
 {
        bool nvm_lar = mvm->nvm_data->lar_enabled;
@@ -1192,6 +1212,12 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
                           IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
 }
 
+static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
+{
+       /* TODO - replace with TLV once defined */
+       return mvm->trans->cfg->use_tfh;
+}
+
 static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
 {
 #ifdef CONFIG_THERMAL
index df6c32c..08d8a8a 100644 (file)
@@ -132,7 +132,8 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
                   IEEE80211_CCMP_PN_LEN) <= 0)
                return -1;
 
-       memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+       if (!(stats->flag & RX_FLAG_AMSDU_MORE))
+               memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
        stats->flag |= RX_FLAG_PN_VALIDATED;
 
        return 0;
@@ -883,6 +884,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                        u8 *qc = ieee80211_get_qos_ctl(hdr);
 
                        *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+                       if (!(desc->amsdu_info &
+                             IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
+                               rx_status->flag |= RX_FLAG_AMSDU_MORE;
                }
                if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
                        iwl_mvm_agg_rx_received(mvm, baid);
index 3130b9c..30fc3af 100644 (file)
@@ -468,6 +468,11 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
                    i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
                        continue;
 
+               /* Don't try and take queues being reconfigured */
+               if (mvm->queue_info[queue].status ==
+                   IWL_MVM_QUEUE_RECONFIGURING)
+                       continue;
+
                ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
        }
 
@@ -501,31 +506,37 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
                queue = ac_to_queue[IEEE80211_AC_VO];
 
        /* Make sure queue found (or not) is legal */
-       if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE &&
-              queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) ||
-             (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE &&
-              queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) ||
-             (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) {
+       if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
+           !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
+           (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
                IWL_ERR(mvm, "No DATA queues available to share\n");
-               queue = -ENOSPC;
+               return -ENOSPC;
+       }
+
+       /* Make sure the queue isn't in the middle of being reconfigured */
+       if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
+               IWL_ERR(mvm,
+                       "TXQ %d is in the middle of re-config - try again\n",
+                       queue);
+               return -EBUSY;
        }
 
        return queue;
 }
 
 /*
- * If a given queue has a higher AC than the TID stream that is being added to
- * it, the queue needs to be redirected to the lower AC. This function does that
+ * If a given queue has a higher AC than the TID stream that is being compared
+ * to, the queue needs to be redirected to the lower AC. This function does that
  * in such a case, otherwise - if no redirection required - it does nothing,
  * unless the %force param is true.
  */
-static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
-                                     int ac, int ssn, unsigned int wdg_timeout,
-                                     bool force)
+int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+                              int ac, int ssn, unsigned int wdg_timeout,
+                              bool force)
 {
        struct iwl_scd_txq_cfg_cmd cmd = {
                .scd_queue = queue,
-               .enable = 0,
+               .action = SCD_CFG_DISABLE_QUEUE,
        };
        bool shared_queue;
        unsigned long mq;
@@ -551,11 +562,12 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
 
        cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
        cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
+       cmd.tid = mvm->queue_info[queue].txq_tid;
        mq = mvm->queue_info[queue].hw_queue_to_mac80211;
        shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
        spin_unlock_bh(&mvm->queue_info_lock);
 
-       IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n",
+       IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
                            queue, iwl_mvm_ac_to_tx_fifo[ac]);
 
        /* Stop MAC queues and wait for this queue to empty */
@@ -580,6 +592,11 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
                             cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
                             ssn, wdg_timeout);
 
+       /* Update the TID "owner" of the queue */
+       spin_lock_bh(&mvm->queue_info_lock);
+       mvm->queue_info[queue].txq_tid = tid;
+       spin_unlock_bh(&mvm->queue_info_lock);
+
        /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
 
        /* Redirect to lower AC */
@@ -709,7 +726,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
        if (WARN_ON(queue <= 0)) {
                IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
                        tid, cfg.sta_id);
-               return -ENOSPC;
+               return queue;
        }
 
        /*
@@ -728,7 +745,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
        if (using_inactive_queue) {
                struct iwl_scd_txq_cfg_cmd cmd = {
                        .scd_queue = queue,
-                       .enable = 0,
+                       .action = SCD_CFG_DISABLE_QUEUE,
                };
                u8 ac;
 
@@ -738,11 +755,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
                ac = mvm->queue_info[queue].mac80211_ac;
                cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
                cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac];
+               cmd.tid = mvm->queue_info[queue].txq_tid;
                spin_unlock_bh(&mvm->queue_info_lock);
 
                /* Disable the queue */
-               iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids,
-                                            true);
+               if (disable_agg_tids)
+                       iwl_mvm_invalidate_sta_queue(mvm, queue,
+                                                    disable_agg_tids, false);
                iwl_trans_txq_disable(mvm->trans, queue, false);
                ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
                                           &cmd);
@@ -758,6 +777,10 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
 
                        return ret;
                }
+
+               /* If TXQ is allocated to another STA, update removal in FW */
+               if (cmd.sta_id != mvmsta->sta_id)
+                       iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
        }
 
        IWL_DEBUG_TX_QUEUES(mvm,
@@ -827,6 +850,119 @@ out_err:
        return ret;
 }
 
+static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
+{
+       struct iwl_scd_txq_cfg_cmd cmd = {
+               .scd_queue = queue,
+               .action = SCD_CFG_UPDATE_QUEUE_TID,
+       };
+       s8 sta_id;
+       int tid;
+       unsigned long tid_bitmap;
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       spin_lock_bh(&mvm->queue_info_lock);
+       sta_id = mvm->queue_info[queue].ra_sta_id;
+       tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+       spin_unlock_bh(&mvm->queue_info_lock);
+
+       if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
+               return;
+
+       /* Find any TID for queue */
+       tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+       cmd.tid = tid;
+       cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
+                       queue, ret);
+       else
+               IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
+                                   queue, tid);
+}
+
+static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
+{
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_sta *mvmsta;
+       s8 sta_id;
+       int tid = -1;
+       unsigned long tid_bitmap;
+       unsigned int wdg_timeout;
+       int ssn;
+       int ret = true;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       spin_lock_bh(&mvm->queue_info_lock);
+       sta_id = mvm->queue_info[queue].ra_sta_id;
+       tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+       spin_unlock_bh(&mvm->queue_info_lock);
+
+       /* Find TID for queue, and make sure it is the only one on the queue */
+       tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+       if (tid_bitmap != BIT(tid)) {
+               IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
+                       queue, tid_bitmap);
+               return;
+       }
+
+       IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
+                           tid);
+
+       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                       lockdep_is_held(&mvm->mutex));
+
+       if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+               return;
+
+       mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+
+       ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+
+       ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
+                                        tid_to_mac80211_ac[tid], ssn,
+                                        wdg_timeout, true);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
+               return;
+       }
+
+       /* If aggs should be turned back on - do it */
+       if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+               struct iwl_mvm_add_sta_cmd cmd;
+
+               mvmsta->tid_disable_agg &= ~BIT(tid);
+
+               cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+               cmd.sta_id = mvmsta->sta_id;
+               cmd.add_modify = STA_MODE_MODIFY;
+               cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+               cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+               cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+               ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+                                          iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+               if (!ret) {
+                       IWL_DEBUG_TX_QUEUES(mvm,
+                                           "TXQ #%d is now aggregated again\n",
+                                           queue);
+
+                       /* Mark queue intenally as aggregating again */
+                       iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
+               }
+       }
+
+       spin_lock_bh(&mvm->queue_info_lock);
+       mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+       spin_unlock_bh(&mvm->queue_info_lock);
+}
+
 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
 {
        if (tid == IWL_MAX_TID_COUNT)
@@ -894,13 +1030,42 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
        struct ieee80211_sta *sta;
        struct iwl_mvm_sta *mvmsta;
        unsigned long deferred_tid_traffic;
-       int sta_id, tid;
+       int queue, sta_id, tid;
 
        /* Check inactivity of queues */
        iwl_mvm_inactivity_check(mvm);
 
        mutex_lock(&mvm->mutex);
 
+       /* Reconfigure queues requiring reconfiguation */
+       for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
+               bool reconfig;
+               bool change_owner;
+
+               spin_lock_bh(&mvm->queue_info_lock);
+               reconfig = (mvm->queue_info[queue].status ==
+                           IWL_MVM_QUEUE_RECONFIGURING);
+
+               /*
+                * We need to take into account a situation in which a TXQ was
+                * allocated to TID x, and then turned shared by adding TIDs y
+                * and z. If TID x becomes inactive and is removed from the TXQ,
+                * ownership must be given to one of the remaining TIDs.
+                * This is mainly because if TID x continues - a new queue can't
+                * be allocated for it as long as it is an owner of another TXQ.
+                */
+               change_owner = !(mvm->queue_info[queue].tid_bitmap &
+                                BIT(mvm->queue_info[queue].txq_tid)) &&
+                              (mvm->queue_info[queue].status ==
+                               IWL_MVM_QUEUE_SHARED);
+               spin_unlock_bh(&mvm->queue_info_lock);
+
+               if (reconfig)
+                       iwl_mvm_unshare_queue(mvm, queue);
+               else if (change_owner)
+                       iwl_mvm_change_queue_owner(mvm, queue);
+       }
+
        /* Go over all stations with deferred traffic */
        for_each_set_bit(sta_id, mvm->sta_deferred_frames,
                         IWL_MVM_STATION_COUNT) {
@@ -963,6 +1128,61 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
        return 0;
 }
 
+/*
+ * In DQA mode, after a HW restart the queues should be allocated as before, in
+ * order to avoid race conditions when there are shared queues. This function
+ * does the re-mapping and queue allocation.
+ *
+ * Note that re-enabling aggregations isn't done in this function.
+ */
+static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
+                                                struct iwl_mvm_sta *mvm_sta)
+{
+       unsigned int wdg_timeout =
+                       iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
+       int i;
+       struct iwl_trans_txq_scd_cfg cfg = {
+               .sta_id = mvm_sta->sta_id,
+               .frame_limit = IWL_FRAME_LIMIT,
+       };
+
+       /* Make sure reserved queue is still marked as such (or allocated) */
+       mvm->queue_info[mvm_sta->reserved_queue].status =
+               IWL_MVM_QUEUE_RESERVED;
+
+       for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+               struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
+               int txq_id = tid_data->txq_id;
+               int ac;
+               u8 mac_queue;
+
+               if (txq_id == IEEE80211_INVAL_HW_QUEUE)
+                       continue;
+
+               skb_queue_head_init(&tid_data->deferred_tx_frames);
+
+               ac = tid_to_mac80211_ac[i];
+               mac_queue = mvm_sta->vif->hw_queue[ac];
+
+               cfg.tid = i;
+               cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
+               cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+                                txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+               IWL_DEBUG_TX_QUEUES(mvm,
+                                   "Re-mapping sta %d tid %d to queue %d\n",
+                                   mvm_sta->sta_id, i, txq_id);
+
+               iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
+                                  IEEE80211_SEQ_TO_SN(tid_data->seq_number),
+                                  &cfg, wdg_timeout);
+
+               mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+       }
+
+       atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
+}
+
 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                    struct ieee80211_vif *vif,
                    struct ieee80211_sta *sta)
@@ -985,6 +1205,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
 
        spin_lock_init(&mvm_sta->lock);
 
+       /* In DQA mode, if this is a HW restart, re-alloc existing queues */
+       if (iwl_mvm_is_dqa_supported(mvm) &&
+           test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+               iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
+               goto update_fw;
+       }
+
        mvm_sta->sta_id = sta_id;
        mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
                                                      mvmvif->color);
@@ -1048,6 +1275,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                        goto err;
        }
 
+update_fw:
        ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
        if (ret)
                goto err;
@@ -1956,7 +2184,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return -EIO;
        }
 
-       spin_lock_bh(&mvm->queue_info_lock);
+       spin_lock(&mvm->queue_info_lock);
 
        /*
         * Note the possible cases:
@@ -1967,14 +2195,20 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
         *      non-DQA mode, since the TXQ hasn't yet been allocated
         */
        txq_id = mvmsta->tid_data[tid].txq_id;
-       if (!iwl_mvm_is_dqa_supported(mvm) ||
+       if (iwl_mvm_is_dqa_supported(mvm) &&
+           unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
+               ret = -ENXIO;
+               IWL_DEBUG_TX_QUEUES(mvm,
+                                   "Can't start tid %d agg on shared queue!\n",
+                                   tid);
+               goto release_locks;
+       } else if (!iwl_mvm_is_dqa_supported(mvm) ||
            mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
                txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
                                                 mvm->first_agg_queue,
                                                 mvm->last_agg_queue);
                if (txq_id < 0) {
                        ret = txq_id;
-                       spin_unlock_bh(&mvm->queue_info_lock);
                        IWL_ERR(mvm, "Failed to allocate agg queue\n");
                        goto release_locks;
                }
@@ -1982,7 +2216,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                /* TXQ hasn't yet been enabled, so mark it only as reserved */
                mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
        }
-       spin_unlock_bh(&mvm->queue_info_lock);
+
+       spin_unlock(&mvm->queue_info_lock);
 
        IWL_DEBUG_TX_QUEUES(mvm,
                            "AGG for tid %d will be on queue #%d\n",
@@ -2006,8 +2241,11 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        }
 
        ret = 0;
+       goto out;
 
 release_locks:
+       spin_unlock(&mvm->queue_info_lock);
+out:
        spin_unlock_bh(&mvmsta->lock);
 
        return ret;
@@ -2023,6 +2261,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
        int queue, ret;
        bool alloc_queue = true;
+       enum iwl_mvm_queue_status queue_status;
        u16 ssn;
 
        struct iwl_trans_txq_scd_cfg cfg = {
@@ -2048,13 +2287,15 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 
+       spin_lock_bh(&mvm->queue_info_lock);
+       queue_status = mvm->queue_info[queue].status;
+       spin_unlock_bh(&mvm->queue_info_lock);
+
        /* In DQA mode, the existing queue might need to be reconfigured */
        if (iwl_mvm_is_dqa_supported(mvm)) {
-               spin_lock_bh(&mvm->queue_info_lock);
                /* Maybe there is no need to even alloc a queue... */
                if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
                        alloc_queue = false;
-               spin_unlock_bh(&mvm->queue_info_lock);
 
                /*
                 * Only reconfig the SCD for the queue if the window size has
@@ -2089,9 +2330,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                   vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
                                   &cfg, wdg_timeout);
 
-       ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
-       if (ret)
-               return -EIO;
+       /* Send ADD_STA command to enable aggs only if the queue isn't shared */
+       if (queue_status != IWL_MVM_QUEUE_SHARED) {
+               ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+               if (ret)
+                       return -EIO;
+       }
 
        /* No need to mark as reserved */
        spin_lock_bh(&mvm->queue_info_lock);
@@ -2123,7 +2367,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        u16 txq_id;
        int err;
 
-
        /*
         * If mac80211 is cleaning its state, then say that we finished since
         * our state has been cleared anyway.
@@ -2152,6 +2395,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
         */
        if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
                mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
+
        spin_unlock_bh(&mvm->queue_info_lock);
 
        switch (tid_data->state) {
@@ -2412,9 +2656,15 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
        struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
 
        /* verify the key details match the required command's expectations */
-       if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
-                   (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
-                   (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
+       if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
+                   (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
+                   (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
+                    keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
+                    keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
+               return -EINVAL;
+
+       if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
+                   keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
                return -EINVAL;
 
        igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
@@ -2430,11 +2680,18 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
                case WLAN_CIPHER_SUITE_AES_CMAC:
                        igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
                        break;
+               case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+               case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+                       igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
+                       break;
                default:
                        return -EINVAL;
                }
 
-               memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
+               memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
+               if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+                       igtk_cmd.ctrl_flags |=
+                               cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
                pn = seq.aes_cmac.pn;
                igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
@@ -2449,6 +2706,19 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
                       remove_key ? "removing" : "installing",
                       igtk_cmd.sta_id);
 
+       if (!iwl_mvm_has_new_rx_api(mvm)) {
+               struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
+                       .ctrl_flags = igtk_cmd.ctrl_flags,
+                       .key_id = igtk_cmd.key_id,
+                       .sta_id = igtk_cmd.sta_id,
+                       .receive_seq_cnt = igtk_cmd.receive_seq_cnt
+               };
+
+               memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
+                      ARRAY_SIZE(igtk_cmd_v1.igtk));
+               return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
+                                           sizeof(igtk_cmd_v1), &igtk_cmd_v1);
+       }
        return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
                                    sizeof(igtk_cmd), &igtk_cmd);
 }
@@ -2573,7 +2843,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
        }
        sta_id = mvm_sta->sta_id;
 
-       if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+       if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+           keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+           keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
                ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
                goto end;
        }
@@ -2659,7 +2931,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
        IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
                      keyconf->keyidx, sta_id);
 
-       if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+       if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+           keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+           keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
                return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
 
        if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
index bbc1cab..709542b 100644 (file)
@@ -554,4 +554,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
 
+int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+                              int ac, int ssn, unsigned int wdg_timeout,
+                              bool force);
+
 #endif /* __sta_h__ */
index c6585ab..8b91544 100644 (file)
@@ -838,6 +838,22 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
        }
 }
 
+/* Check if there are any timed-out TIDs on a given shared TXQ */
+static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
+{
+       unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
+       unsigned long now = jiffies;
+       int tid;
+
+       for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+               if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
+                               IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+                       return true;
+       }
+
+       return false;
+}
+
 /*
  * Sets the fields in the Tx cmd that are crypto related
  */
@@ -940,7 +956,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                        iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
                        spin_unlock(&mvmsta->lock);
                        return 0;
-
                }
 
                /* If we are here - TXQ exists and needs to be re-activated */
@@ -953,8 +968,25 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                                    txq_id);
        }
 
-       /* Keep track of the time of the last frame for this RA/TID */
-       mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
+       if (iwl_mvm_is_dqa_supported(mvm)) {
+               /* Keep track of the time of the last frame for this RA/TID */
+               mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
+
+               /*
+                * If we have timed-out TIDs - schedule the worker that will
+                * reconfig the queues and update them
+                *
+                * Note that the mvm->queue_info_lock isn't being taken here in
+                * order to not serialize the TX flow. This isn't dangerous
+                * because scheduling mvm->add_stream_wk can't ruin the state,
+                * and if we DON'T schedule it due to some race condition then
+                * next TX we get here we will.
+                */
+               if (unlikely(mvm->queue_info[txq_id].status ==
+                            IWL_MVM_QUEUE_SHARED &&
+                            iwl_mvm_txq_should_update(mvm, txq_id)))
+                       schedule_work(&mvm->add_stream_wk);
+       }
 
        IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
                     tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
index 68f4e7f..7c138fe 100644 (file)
@@ -610,7 +610,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
 {
        struct iwl_scd_txq_cfg_cmd cmd = {
                .scd_queue = queue,
-               .enable = 1,
+               .action = SCD_CFG_ENABLE_QUEUE,
                .window = frame_limit,
                .sta_id = sta_id,
                .ssn = cpu_to_le16(ssn),
@@ -669,6 +669,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
                                tid_to_mac80211_ac[cfg->tid];
                else
                        mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
+
+               mvm->queue_info[queue].txq_tid = cfg->tid;
        }
 
        IWL_DEBUG_TX_QUEUES(mvm,
@@ -682,7 +684,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
        if (enable_queue) {
                struct iwl_scd_txq_cfg_cmd cmd = {
                        .scd_queue = queue,
-                       .enable = 1,
+                       .action = SCD_CFG_ENABLE_QUEUE,
                        .window = cfg->frame_limit,
                        .sta_id = cfg->sta_id,
                        .ssn = cpu_to_le16(ssn),
@@ -709,7 +711,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 {
        struct iwl_scd_txq_cfg_cmd cmd = {
                .scd_queue = queue,
-               .enable = 0,
+               .action = SCD_CFG_DISABLE_QUEUE,
        };
        bool remove_mac_queue = true;
        int ret;
@@ -744,8 +746,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
                        ~BIT(mac80211_queue);
        mvm->queue_info[queue].hw_queue_refcount--;
 
-       cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
-       if (!cmd.enable)
+       cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
+               SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
+       if (cmd.action == SCD_CFG_DISABLE_QUEUE)
                mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
 
        IWL_DEBUG_TX_QUEUES(mvm,
@@ -755,12 +758,13 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
                            mvm->queue_info[queue].hw_queue_to_mac80211);
 
        /* If the queue is still enabled - nothing left to do in this func */
-       if (cmd.enable) {
+       if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
                spin_unlock_bh(&mvm->queue_info_lock);
                return;
        }
 
        cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+       cmd.tid = mvm->queue_info[queue].txq_tid;
 
        /* Make sure queue info is correct even though we overwrite it */
        WARN(mvm->queue_info[queue].hw_queue_refcount ||
@@ -1131,7 +1135,13 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
                        BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
        }
 
-       /* TODO: if queue was shared - need to re-enable AGGs */
+       /* If the queue is marked as shared - "unshare" it */
+       if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
+           mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
+               mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
+               IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
+                                   queue);
+       }
 }
 
 void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
index 78cf9a7..c6e24fb 100644 (file)
@@ -502,20 +502,27 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
+       {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
 
 /* a000 Series */
        {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
@@ -608,7 +615,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
        const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
-       const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
        struct iwl_trans *iwl_trans;
        int ret;
 
@@ -637,11 +643,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        if (iwl_trans->cfg->rf_id) {
-               if (cfg == &iwl9260_2ac_cfg)
-                       cfg_9260lc = &iwl9260lc_2ac_cfg;
-               if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
-                       cfg = cfg_9260lc;
-                       iwl_trans->cfg = cfg_9260lc;
+               if (cfg == &iwl9460_2ac_cfg &&
+                   iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
+                       cfg = &iwl9000lc_2ac_cfg;
+                       iwl_trans->cfg = cfg;
                }
        }
 #endif
index 74f2f03..2f46eed 100644 (file)
@@ -827,10 +827,16 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
                if (ret)
                        return ret;
 
-               /* Notify the ucode of the loaded section number and status */
-               val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
-               val = val | (sec_num << shift_param);
-               iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+               /* Notify ucode of loaded section number and status */
+               if (trans->cfg->use_tfh) {
+                       val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
+                       val = val | (sec_num << shift_param);
+                       iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
+               } else {
+                       val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+                       val = val | (sec_num << shift_param);
+                       iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+               }
                sec_num = (sec_num << 1) | 0x1;
        }
 
@@ -838,10 +844,21 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
 
        iwl_enable_interrupts(trans);
 
-       if (cpu == 1)
-               iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
-       else
-               iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
+       if (trans->cfg->use_tfh) {
+               if (cpu == 1)
+                       iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+                                      0xFFFF);
+               else
+                       iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+                                      0xFFFFFFFF);
+       } else {
+               if (cpu == 1)
+                       iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+                                          0xFFFF);
+               else
+                       iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+                                          0xFFFFFFFF);
+       }
 
        return 0;
 }
@@ -886,14 +903,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
                        return ret;
        }
 
-       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
-               iwl_set_bits_prph(trans,
-                                 CSR_UCODE_LOAD_STATUS_ADDR,
-                                 (LMPM_CPU_UCODE_LOADING_COMPLETED |
-                                  LMPM_CPU_HDRS_LOADING_COMPLETED |
-                                  LMPM_CPU_UCODE_LOADING_STARTED) <<
-                                       shift_param);
-
        *first_ucode_section = last_read_idx;
 
        return 0;
@@ -1960,6 +1969,10 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
        IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
                txq->q.read_ptr, txq->q.write_ptr);
 
+       if (trans->cfg->use_tfh)
+               /* TODO: access new SCD registers and dump them */
+               return;
+
        scd_sram_addr = trans_pcie->scd_base_addr +
                        SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
        iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
index 18650dc..9636dc8 100644 (file)
@@ -703,6 +703,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
        memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
        memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
 
+       if (trans->cfg->use_tfh)
+               return;
+
        trans_pcie->scd_base_addr =
                iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
 
@@ -970,11 +973,13 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
                }
        }
 
-       if (trans->cfg->use_tfh)
+       if (trans->cfg->use_tfh) {
                iwl_write_direct32(trans, TFH_TRANSFER_MODE,
                                   TFH_TRANSFER_MAX_PENDING_REQ |
                                   TFH_CHUNK_SIZE_128 |
                                   TFH_CHUNK_SPLIT_MODE);
+               return 0;
+       }
 
        iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
        if (trans->cfg->base_params->num_of_queues > 20)
@@ -1249,6 +1254,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
        if (test_and_set_bit(txq_id, trans_pcie->queue_used))
                WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
 
+       if (cfg && trans->cfg->use_tfh)
+               WARN_ONCE(1, "Expected no calls to SCD configuration");
+
        txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
 
        if (cfg) {
@@ -1366,6 +1374,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
                return;
        }
 
+       if (configure_scd && trans->cfg->use_tfh)
+               WARN_ONCE(1, "Expected no calls to SCD configuration");
+
        if (configure_scd) {
                iwl_scd_txq_set_inactive(trans, txq_id);
 
index 3e5fa78..a5656bc 100644 (file)
@@ -3041,13 +3041,9 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
            p->length > 1024 || !p->pointer)
                return -EINVAL;
 
-       param = kmalloc(p->length, GFP_KERNEL);
-       if (param == NULL)
-               return -ENOMEM;
-
-       if (copy_from_user(param, p->pointer, p->length)) {
-               ret = -EFAULT;
-               goto out;
+       param = memdup_user(p->pointer, p->length);
+       if (IS_ERR(param)) {
+               return PTR_ERR(param);
        }
 
        if (p->length < sizeof(struct prism2_download_param) +
@@ -3803,13 +3799,9 @@ static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
            p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
                return -EINVAL;
 
-       param = kmalloc(p->length, GFP_KERNEL);
-       if (param == NULL)
-               return -ENOMEM;
-
-       if (copy_from_user(param, p->pointer, p->length)) {
-               ret = -EFAULT;
-               goto out;
+       param = memdup_user(p->pointer, p->length);
+       if (IS_ERR(param)) {
+               return PTR_ERR(param);
        }
 
        switch (param->cmd) {
index 81c60d0..43dccd5 100644 (file)
@@ -260,22 +260,17 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
 
        rdr_event = (void *)(skb->data + sizeof(u32));
 
-       if (le32_to_cpu(rdr_event->passed)) {
-               mwifiex_dbg(priv->adapter, MSG,
-                           "radar detected; indicating kernel\n");
-               if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
-                       mwifiex_dbg(priv->adapter, ERROR,
-                                   "Failed to stop CAC in FW\n");
-               cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
-                                    GFP_KERNEL);
-               mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n",
-                           rdr_event->reg_domain);
-               mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n",
-                           rdr_event->det_type);
-       } else {
-               mwifiex_dbg(priv->adapter, MSG,
-                           "false radar detection event!\n");
-       }
+       mwifiex_dbg(priv->adapter, MSG,
+                   "radar detected; indicating kernel\n");
+       if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Failed to stop CAC in FW\n");
+       cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
+                            GFP_KERNEL);
+       mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n",
+                   rdr_event->reg_domain);
+       mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n",
+                   rdr_event->det_type);
 
        return 0;
 }
index afdd58a..ea0fa68 100644 (file)
@@ -171,9 +171,10 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
 static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
                                             struct mwifiex_sta_node *node)
 {
-
-       if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) ||
-           !priv->ap_11n_enabled)
+       if (!node || ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP) &&
+                     !priv->ap_11n_enabled) ||
+           ((priv->bss_mode == NL80211_IFTYPE_ADHOC) &&
+            !priv->adapter->adhoc_11n_enabled))
                return 0;
 
        return node->is_11n_enabled;
index dc49c3d..c47d636 100644 (file)
@@ -205,7 +205,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
 
        do {
                /* Check if AMSDU can accommodate this MSDU */
-               if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
+               if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) >
+                   adapter->tx_buf_size)
                        break;
 
                skb_src = skb_dequeue(&pra_list->skb_head);
index a74cc43..9448012 100644 (file)
@@ -78,8 +78,15 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
  */
 static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
 {
-       int ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
 
+       int ret;
+
+       if (!payload) {
+               mwifiex_dbg(priv->adapter, INFO, "info: fw drop data\n");
+               return 0;
+       }
+
+       ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
        if (!ret)
                return 0;
 
@@ -921,3 +928,72 @@ void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter)
        else
                mwifiex_update_ampdu_rxwinsize(adapter, false);
 }
+
+/* This function handles rxba_sync event
+ */
+void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+                                u8 *event_buf, u16 len)
+{
+       struct mwifiex_ie_types_rxba_sync *tlv_rxba = (void *)event_buf;
+       u16 tlv_type, tlv_len;
+       struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
+       u8 i, j;
+       u16 seq_num, tlv_seq_num, tlv_bitmap_len;
+       int tlv_buf_left = len;
+       int ret;
+       u8 *tmp;
+
+       mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
+                        event_buf, len);
+       while (tlv_buf_left >= sizeof(*tlv_rxba)) {
+               tlv_type = le16_to_cpu(tlv_rxba->header.type);
+               tlv_len  = le16_to_cpu(tlv_rxba->header.len);
+               if (tlv_type != TLV_TYPE_RXBA_SYNC) {
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "Wrong TLV id=0x%x\n", tlv_type);
+                       return;
+               }
+
+               tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
+               tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "%pM tid=%d seq_num=%d bitmap_len=%d\n",
+                           tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
+                           tlv_bitmap_len);
+
+               rx_reor_tbl_ptr =
+                       mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
+                                                      tlv_rxba->mac);
+               if (!rx_reor_tbl_ptr) {
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "Can not find rx_reorder_tbl!");
+                       return;
+               }
+
+               for (i = 0; i < tlv_bitmap_len; i++) {
+                       for (j = 0 ; j < 8; j++) {
+                               if (tlv_rxba->bitmap[i] & (1 << j)) {
+                                       seq_num = (MAX_TID_VALUE - 1) &
+                                               (tlv_seq_num + i * 8 + j);
+
+                                       mwifiex_dbg(priv->adapter, ERROR,
+                                                   "drop packet,seq=%d\n",
+                                                   seq_num);
+
+                                       ret = mwifiex_11n_rx_reorder_pkt
+                                       (priv, seq_num, tlv_rxba->tid,
+                                        tlv_rxba->mac, 0, NULL);
+
+                                       if (ret)
+                                               mwifiex_dbg(priv->adapter,
+                                                           ERROR,
+                                                           "Fail to drop packet");
+                               }
+                       }
+               }
+
+               tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
+               tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
+               tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
+       }
+}
index 63ecea8..22d991f 100644 (file)
@@ -81,5 +81,6 @@ struct mwifiex_rx_reorder_tbl *
 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta);
 void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta);
 void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags);
-
+void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+                                u8 *event_buf, u16 len);
 #endif /* _MWIFIEX_11N_RXREORDER_H_ */
index a8ff969..c7f2faa 100644 (file)
@@ -2012,10 +2012,6 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
        if (mwifiex_deauthenticate(priv, NULL))
                return -EFAULT;
 
-       mwifiex_dbg(priv->adapter, MSG,
-                   "info: successfully disconnected from %pM:\t"
-                   "reason code %d\n", priv->cfg_bssid, reason_code);
-
        eth_zero_addr(priv->cfg_bssid);
        priv->hs2_enabled = false;
 
@@ -2485,6 +2481,16 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
 
        priv->scan_request = request;
 
+       if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+               ether_addr_copy(priv->random_mac, request->mac_addr);
+               for (i = 0; i < ETH_ALEN; i++) {
+                       priv->random_mac[i] &= request->mac_addr_mask[i];
+                       priv->random_mac[i] |= get_random_int() &
+                                              ~(request->mac_addr_mask[i]);
+               }
+       }
+
+       ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
        user_scan_cfg->num_ssids = request->n_ssids;
        user_scan_cfg->ssid_list = request->ssids;
 
@@ -2726,7 +2732,7 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
                ht_info->cap &= ~IEEE80211_HT_CAP_SGI_40;
 
        if (adapter->user_dev_mcs_support == HT_STREAM_2X2)
-               ht_info->cap |= 3 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
+               ht_info->cap |= 2 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
        else
                ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
 
@@ -3913,6 +3919,88 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
        return ret;
 }
 
+#ifdef CONFIG_NL80211_TESTMODE
+
+enum mwifiex_tm_attr {
+       __MWIFIEX_TM_ATTR_INVALID       = 0,
+       MWIFIEX_TM_ATTR_CMD             = 1,
+       MWIFIEX_TM_ATTR_DATA            = 2,
+
+       /* keep last */
+       __MWIFIEX_TM_ATTR_AFTER_LAST,
+       MWIFIEX_TM_ATTR_MAX             = __MWIFIEX_TM_ATTR_AFTER_LAST - 1,
+};
+
+static const struct nla_policy mwifiex_tm_policy[MWIFIEX_TM_ATTR_MAX + 1] = {
+       [MWIFIEX_TM_ATTR_CMD]           = { .type = NLA_U32 },
+       [MWIFIEX_TM_ATTR_DATA]          = { .type = NLA_BINARY,
+                                           .len = MWIFIEX_SIZE_OF_CMD_BUFFER },
+};
+
+enum mwifiex_tm_command {
+       MWIFIEX_TM_CMD_HOSTCMD  = 0,
+};
+
+static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+                         void *data, int len)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+       struct mwifiex_ds_misc_cmd *hostcmd;
+       struct nlattr *tb[MWIFIEX_TM_ATTR_MAX + 1];
+       struct mwifiex_adapter *adapter;
+       struct sk_buff *skb;
+       int err;
+
+       if (!priv)
+               return -EINVAL;
+       adapter = priv->adapter;
+
+       err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len,
+                       mwifiex_tm_policy);
+       if (err)
+               return err;
+
+       if (!tb[MWIFIEX_TM_ATTR_CMD])
+               return -EINVAL;
+
+       switch (nla_get_u32(tb[MWIFIEX_TM_ATTR_CMD])) {
+       case MWIFIEX_TM_CMD_HOSTCMD:
+               if (!tb[MWIFIEX_TM_ATTR_DATA])
+                       return -EINVAL;
+
+               hostcmd = kzalloc(sizeof(*hostcmd), GFP_KERNEL);
+               if (!hostcmd)
+                       return -ENOMEM;
+
+               hostcmd->len = nla_len(tb[MWIFIEX_TM_ATTR_DATA]);
+               memcpy(hostcmd->cmd, nla_data(tb[MWIFIEX_TM_ATTR_DATA]),
+                      hostcmd->len);
+
+               if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
+                       dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
+                       return -EFAULT;
+               }
+
+               /* process hostcmd response*/
+               skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
+               if (!skb)
+                       return -ENOMEM;
+               err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
+                             hostcmd->len, hostcmd->cmd);
+               if (err) {
+                       kfree_skb(skb);
+                       return -EMSGSIZE;
+               }
+
+               err = cfg80211_testmode_reply(skb);
+               kfree(hostcmd);
+               return err;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+#endif
+
 static int
 mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
                                       struct net_device *dev,
@@ -4025,6 +4113,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch,
        .add_station = mwifiex_cfg80211_add_station,
        .change_station = mwifiex_cfg80211_change_station,
+       CFG80211_TESTMODE_CMD(mwifiex_tm_cmd)
        .get_channel = mwifiex_cfg80211_get_channel,
        .start_radar_detection = mwifiex_cfg80211_start_radar_detection,
        .channel_switch = mwifiex_cfg80211_channel_switch,
@@ -4135,9 +4224,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        wiphy->cipher_suites = mwifiex_cipher_suites;
        wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
 
-       if (adapter->region_code)
-               wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS |
+       if (adapter->regd) {
+               wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+                                          REGULATORY_DISABLE_BEACON_HINTS |
                                           REGULATORY_COUNTRY_IE_IGNORE;
+               wiphy_apply_custom_regulatory(wiphy, adapter->regd);
+       }
 
        ether_addr_copy(wiphy->perm_addr, adapter->perm_addr);
        wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
@@ -4173,7 +4265,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        wiphy->features |= NL80211_FEATURE_HT_IBSS |
                           NL80211_FEATURE_INACTIVITY_TIMER |
                           NL80211_FEATURE_LOW_PRIORITY_SCAN |
-                          NL80211_FEATURE_NEED_OBSS_SCAN;
+                          NL80211_FEATURE_NEED_OBSS_SCAN |
+                          NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+                          NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+                          NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
 
        if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
                wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
@@ -4200,19 +4295,27 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                return ret;
        }
 
-       if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
-               mwifiex_dbg(adapter, INFO,
-                           "driver hint alpha2: %2.2s\n", reg_alpha2);
-               regulatory_hint(wiphy, reg_alpha2);
-       } else {
-               if (adapter->region_code == 0x00) {
-                       mwifiex_dbg(adapter, WARN, "Ignore world regulatory domain\n");
+       if (!adapter->regd) {
+               if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
+                       mwifiex_dbg(adapter, INFO,
+                                   "driver hint alpha2: %2.2s\n", reg_alpha2);
+                       regulatory_hint(wiphy, reg_alpha2);
                } else {
-                       country_code =
-                               mwifiex_11d_code_2_region(adapter->region_code);
-                       if (country_code &&
-                           regulatory_hint(wiphy, country_code))
-                               mwifiex_dbg(priv->adapter, ERROR, "regulatory_hint() failed\n");
+                       if (adapter->region_code == 0x00) {
+                               mwifiex_dbg(adapter, WARN,
+                                           "Ignore world regulatory domain\n");
+                       } else {
+                               wiphy->regulatory_flags |=
+                                       REGULATORY_DISABLE_BEACON_HINTS |
+                                       REGULATORY_COUNTRY_IE_IGNORE;
+                               country_code =
+                                       mwifiex_11d_code_2_region(
+                                               adapter->region_code);
+                               if (country_code &&
+                                   regulatory_hint(wiphy, country_code))
+                                       mwifiex_dbg(priv->adapter, ERROR,
+                                                   "regulatory_hint() failed\n");
+                       }
                }
        }
 
index c29f26d..5347728 100644 (file)
@@ -480,13 +480,27 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
  */
 int mwifiex_process_event(struct mwifiex_adapter *adapter)
 {
-       int ret;
+       int ret, i;
        struct mwifiex_private *priv =
                mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
        struct sk_buff *skb = adapter->event_skb;
-       u32 eventcause = adapter->event_cause;
+       u32 eventcause;
        struct mwifiex_rxinfo *rx_info;
 
+       if ((adapter->event_cause & EVENT_ID_MASK) == EVENT_RADAR_DETECTED) {
+               for (i = 0; i < adapter->priv_num; i++) {
+                       priv = adapter->priv[i];
+                       if (priv && mwifiex_is_11h_active(priv)) {
+                               adapter->event_cause |=
+                                       ((priv->bss_num & 0xff) << 16) |
+                                       ((priv->bss_type & 0xff) << 24);
+                               break;
+                       }
+               }
+       }
+
+       eventcause = adapter->event_cause;
+
        /* Save the last event to debug log */
        adapter->dbg.last_event_index =
                        (adapter->dbg.last_event_index + 1) % DBG_CMD_NUM;
@@ -581,6 +595,14 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
                        return -1;
                }
        }
+       /* We don't expect commands in manufacturing mode. They are cooked
+        * in application and ready to download buffer is passed to the driver
+        */
+       if (adapter->mfg_mode && cmd_no) {
+               dev_dbg(adapter->dev, "Ignoring commands in manufacturing mode\n");
+               return -1;
+       }
+
 
        /* Get a new command node */
        cmd_node = mwifiex_get_cmd_node(adapter);
index bccf17a..b9284b5 100644 (file)
@@ -118,6 +118,8 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
                p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
                p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
                p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
+               p += sprintf(p, "region_code=\"0x%x\"\n",
+                            priv->adapter->region_code);
 
                netdev_for_each_mc_addr(ha, netdev)
                        p += sprintf(p, "multicast_address[%d]=\"%pM\"\n",
index 5596b6b..18aa525 100644 (file)
@@ -176,6 +176,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_PWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 145)
 #define TLV_TYPE_GWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 146)
 #define TLV_TYPE_TX_PAUSE           (PROPRIETARY_TLV_BASE_ID + 148)
+#define TLV_TYPE_RXBA_SYNC          (PROPRIETARY_TLV_BASE_ID + 153)
 #define TLV_TYPE_COALESCE_RULE      (PROPRIETARY_TLV_BASE_ID + 154)
 #define TLV_TYPE_KEY_PARAM_V2       (PROPRIETARY_TLV_BASE_ID + 156)
 #define TLV_TYPE_REPEAT_COUNT       (PROPRIETARY_TLV_BASE_ID + 176)
@@ -188,6 +189,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_BTCOEX_WL_AGGR_WINSIZE  (PROPRIETARY_TLV_BASE_ID + 202)
 #define TLV_BTCOEX_WL_SCANTIME      (PROPRIETARY_TLV_BASE_ID + 203)
 #define TLV_TYPE_BSS_MODE           (PROPRIETARY_TLV_BASE_ID + 206)
+#define TLV_TYPE_RANDOM_MAC         (PROPRIETARY_TLV_BASE_ID + 236)
+#define TLV_TYPE_CHAN_ATTR_CFG      (PROPRIETARY_TLV_BASE_ID + 237)
 
 #define MWIFIEX_TX_DATA_BUF_SIZE_2K        2048
 
@@ -208,6 +211,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 
 #define MWIFIEX_TX_DATA_BUF_SIZE_4K        4096
 #define MWIFIEX_TX_DATA_BUF_SIZE_8K        8192
+#define MWIFIEX_TX_DATA_BUF_SIZE_12K       12288
 
 #define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
 #define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
@@ -379,6 +383,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_MC_POLICY                         0x0121
 #define HostCmd_CMD_TDLS_OPER                         0x0122
 #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG               0x0223
+#define HostCmd_CMD_CHAN_REGION_CFG                  0x0242
 
 #define PROTOCOL_NO_SECURITY        0x01
 #define PROTOCOL_STATIC_WEP         0x02
@@ -411,6 +416,14 @@ enum P2P_MODES {
        P2P_MODE_CLIENT = 3,
 };
 
+enum mwifiex_channel_flags {
+       MWIFIEX_CHANNEL_PASSIVE = BIT(0),
+       MWIFIEX_CHANNEL_DFS = BIT(1),
+       MWIFIEX_CHANNEL_NOHT40 = BIT(2),
+       MWIFIEX_CHANNEL_NOHT80 = BIT(3),
+       MWIFIEX_CHANNEL_DISABLED = BIT(7),
+};
+
 #define HostCmd_RET_BIT                       0x8000
 #define HostCmd_ACT_GEN_GET                   0x0000
 #define HostCmd_ACT_GEN_SET                   0x0001
@@ -504,6 +517,8 @@ enum P2P_MODES {
 #define EVENT_RSSI_HIGH                 0x0000001c
 #define EVENT_SNR_HIGH                  0x0000001d
 #define EVENT_IBSS_COALESCED            0x0000001e
+#define EVENT_IBSS_STA_CONNECT          0x00000020
+#define EVENT_IBSS_STA_DISCONNECT       0x00000021
 #define EVENT_DATA_RSSI_LOW             0x00000024
 #define EVENT_DATA_SNR_LOW              0x00000025
 #define EVENT_DATA_RSSI_HIGH            0x00000026
@@ -531,6 +546,7 @@ enum P2P_MODES {
 #define EVENT_CHANNEL_REPORT_RDY        0x00000054
 #define EVENT_TX_DATA_PAUSE             0x00000055
 #define EVENT_EXT_SCAN_REPORT           0x00000058
+#define EVENT_RXBA_SYNC                 0x00000059
 #define EVENT_BG_SCAN_STOPPED           0x00000065
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 #define EVENT_MULTI_CHAN_INFO           0x0000006a
@@ -734,6 +750,16 @@ struct mwifiex_ie_types_chan_list_param_set {
        struct mwifiex_chan_scan_param_set chan_scan_param[1];
 } __packed;
 
+struct mwifiex_ie_types_rxba_sync {
+       struct mwifiex_ie_types_header header;
+       u8 mac[ETH_ALEN];
+       u8 tid;
+       u8 reserved;
+       __le16 seq_num;
+       __le16 bitmap_len;
+       u8 bitmap[1];
+} __packed;
+
 struct chan_band_param_set {
        u8 radio_type;
        u8 chan_number;
@@ -780,6 +806,11 @@ struct mwifiex_ie_types_scan_chan_gap {
        __le16 chan_gap;
 } __packed;
 
+struct mwifiex_ie_types_random_mac {
+       struct mwifiex_ie_types_header header;
+       u8 mac[ETH_ALEN];
+} __packed;
+
 struct mwifiex_ietypes_chanstats {
        struct mwifiex_ie_types_header header;
        struct mwifiex_fw_chan_stats chanstats[0];
@@ -1464,6 +1495,7 @@ struct mwifiex_user_scan_cfg {
        /* Variable number (fixed maximum) of channels to scan up */
        struct mwifiex_user_scan_chan chan_list[MWIFIEX_USER_SCAN_CHAN_MAX];
        u16 scan_chan_gap;
+       u8 random_mac[ETH_ALEN];
 } __packed;
 
 #define MWIFIEX_BG_SCAN_CHAN_MAX 38
@@ -1646,7 +1678,7 @@ struct mwifiex_ie_types_sta_info {
 };
 
 struct host_cmd_ds_sta_list {
-       u16 sta_count;
+       __le16 sta_count;
        u8 tlv[0];
 } __packed;
 
@@ -1667,6 +1699,12 @@ struct mwifiex_ie_types_wmm_param_set {
        u8 wmm_ie[1];
 };
 
+struct mwifiex_ie_types_mgmt_frame {
+       struct mwifiex_ie_types_header header;
+       __le16 frame_control;
+       u8 frame_contents[0];
+};
+
 struct mwifiex_ie_types_wmm_queue_status {
        struct mwifiex_ie_types_header header;
        u8 queue_index;
@@ -2034,26 +2072,26 @@ struct host_cmd_ds_set_bss_mode {
 
 struct host_cmd_ds_pcie_details {
        /* TX buffer descriptor ring address */
-       u32 txbd_addr_lo;
-       u32 txbd_addr_hi;
+       __le32 txbd_addr_lo;
+       __le32 txbd_addr_hi;
        /* TX buffer descriptor ring count */
-       u32 txbd_count;
+       __le32 txbd_count;
 
        /* RX buffer descriptor ring address */
-       u32 rxbd_addr_lo;
-       u32 rxbd_addr_hi;
+       __le32 rxbd_addr_lo;
+       __le32 rxbd_addr_hi;
        /* RX buffer descriptor ring count */
-       u32 rxbd_count;
+       __le32 rxbd_count;
 
        /* Event buffer descriptor ring address */
-       u32 evtbd_addr_lo;
-       u32 evtbd_addr_hi;
+       __le32 evtbd_addr_lo;
+       __le32 evtbd_addr_hi;
        /* Event buffer descriptor ring count */
-       u32 evtbd_count;
+       __le32 evtbd_count;
 
        /* Sleep cookie buffer physical address */
-       u32 sleep_cookie_addr_lo;
-       u32 sleep_cookie_addr_hi;
+       __le32 sleep_cookie_addr_lo;
+       __le32 sleep_cookie_addr_hi;
 } __packed;
 
 struct mwifiex_ie_types_rssi_threshold {
@@ -2093,8 +2131,8 @@ struct mwifiex_ie_types_mc_group_info {
        u8 chan_buf_weight;
        u8 band_config;
        u8 chan_num;
-       u32 chan_time;
-       u32 reserved;
+       __le32 chan_time;
+       __le32 reserved;
        union {
                u8 sdio_func_num;
                u8 usb_ep_num;
@@ -2185,7 +2223,7 @@ struct host_cmd_ds_robust_coex {
 } __packed;
 
 struct host_cmd_ds_wakeup_reason {
-       u16  wakeup_reason;
+       __le16  wakeup_reason;
 } __packed;
 
 struct host_cmd_ds_gtk_rekey_params {
@@ -2196,6 +2234,10 @@ struct host_cmd_ds_gtk_rekey_params {
        __le32 replay_ctr_high;
 } __packed;
 
+struct host_cmd_ds_chan_region_cfg {
+       __le16 action;
+} __packed;
+
 struct host_cmd_ds_command {
        __le16 command;
        __le16 size;
@@ -2270,6 +2312,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_robust_coex coex;
                struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
                struct host_cmd_ds_gtk_rekey_params rekey;
+               struct host_cmd_ds_chan_region_cfg reg_cfg;
        } params;
 } __packed;
 
index 1489c90..82839d9 100644 (file)
@@ -298,6 +298,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
        adapter->arp_filter_size = 0;
        adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
+       adapter->mfg_mode = mfg_mode;
        adapter->key_api_major_ver = 0;
        adapter->key_api_minor_ver = 0;
        eth_broadcast_addr(adapter->perm_addr);
@@ -553,15 +554,22 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
                                return -1;
                }
        }
+       if (adapter->mfg_mode) {
+               adapter->hw_status = MWIFIEX_HW_STATUS_READY;
+               ret = -EINPROGRESS;
+       } else {
+               for (i = 0; i < adapter->priv_num; i++) {
+                       if (adapter->priv[i]) {
+                               ret = mwifiex_sta_init_cmd(adapter->priv[i],
+                                                          first_sta, true);
+                               if (ret == -1)
+                                       return -1;
+
+                               first_sta = false;
+                       }
+
 
-       for (i = 0; i < adapter->priv_num; i++) {
-               if (adapter->priv[i]) {
-                       ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
-                                                  true);
-                       if (ret == -1)
-                               return -1;
 
-                       first_sta = false;
                }
        }
 
index 1c7b006..b89596c 100644 (file)
@@ -669,9 +669,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
        priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
                                   sizeof(priv->assoc_rsp_buf));
 
-       memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
-
        assoc_rsp->a_id = cpu_to_le16(aid);
+       memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
 
        if (status_code) {
                priv->adapter->dbg.num_cmd_assoc_failure++;
index db4925d..9b2e98c 100644 (file)
@@ -23,6 +23,7 @@
 #include "11n.h"
 
 #define VERSION        "1.0"
+#define MFG_FIRMWARE   "mwifiex_mfg.bin"
 
 static unsigned int debug_mask = MWIFIEX_DEFAULT_DEBUG_MASK;
 module_param(debug_mask, uint, 0);
@@ -37,6 +38,10 @@ module_param(driver_mode, ushort, 0);
 MODULE_PARM_DESC(driver_mode,
                 "station=0x1(default), ap-sta=0x3, station-p2p=0x5, ap-sta-p2p=0x7");
 
+bool mfg_mode;
+module_param(mfg_mode, bool, 0);
+MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
+
 /*
  * This function registers the device and performs all the necessary
  * initializations.
@@ -139,6 +144,8 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
                adapter->nd_info = NULL;
        }
 
+       kfree(adapter->regd);
+
        vfree(adapter->chan_stats);
        kfree(adapter);
        return 0;
@@ -486,9 +493,11 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
  */
 static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
 {
-       flush_workqueue(adapter->workqueue);
-       destroy_workqueue(adapter->workqueue);
-       adapter->workqueue = NULL;
+       if (adapter->workqueue) {
+               flush_workqueue(adapter->workqueue);
+               destroy_workqueue(adapter->workqueue);
+               adapter->workqueue = NULL;
+       }
 
        if (adapter->rx_workqueue) {
                flush_workqueue(adapter->rx_workqueue);
@@ -559,16 +568,21 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
                goto done;
        }
        /* Wait for mwifiex_init to complete */
-       wait_event_interruptible(adapter->init_wait_q,
-                                adapter->init_wait_q_woken);
-       if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
-               goto err_init_fw;
+       if (!adapter->mfg_mode) {
+               wait_event_interruptible(adapter->init_wait_q,
+                                        adapter->init_wait_q_woken);
+               if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
+                       goto err_init_fw;
+       }
 
        priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
-       if (mwifiex_register_cfg80211(adapter)) {
-               mwifiex_dbg(adapter, ERROR,
-                           "cannot register with cfg80211\n");
-               goto err_init_fw;
+
+       if (!adapter->wiphy) {
+               if (mwifiex_register_cfg80211(adapter)) {
+                       mwifiex_dbg(adapter, ERROR,
+                                   "cannot register with cfg80211\n");
+                       goto err_init_fw;
+               }
        }
 
        if (mwifiex_init_channel_scan_gap(adapter)) {
@@ -662,16 +676,41 @@ done:
 /*
  * This function initializes the hardware and gets firmware.
  */
-static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
+static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter,
+                             bool req_fw_nowait)
 {
        int ret;
 
-       ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
-                                     adapter->dev, GFP_KERNEL, adapter,
-                                     mwifiex_fw_dpc);
-       if (ret < 0)
-               mwifiex_dbg(adapter, ERROR,
-                           "request_firmware_nowait error %d\n", ret);
+       /* Override default firmware with manufacturing one if
+        * manufacturing mode is enabled
+        */
+       if (mfg_mode) {
+               if (strlcpy(adapter->fw_name, MFG_FIRMWARE,
+                           sizeof(adapter->fw_name)) >=
+                           sizeof(adapter->fw_name)) {
+                       pr_err("%s: fw_name too long!\n", __func__);
+                       return -1;
+               }
+       }
+
+       if (req_fw_nowait) {
+               ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
+                                             adapter->dev, GFP_KERNEL, adapter,
+                                             mwifiex_fw_dpc);
+               if (ret < 0)
+                       mwifiex_dbg(adapter, ERROR,
+                                   "request_firmware_nowait error %d\n", ret);
+       } else {
+               ret = request_firmware(&adapter->firmware,
+                                      adapter->fw_name,
+                                      adapter->dev);
+               if (ret < 0)
+                       mwifiex_dbg(adapter, ERROR,
+                                   "request_firmware error %d\n", ret);
+               else
+                       mwifiex_fw_dpc(adapter->firmware, (void *)adapter);
+       }
+
        return ret;
 }
 
@@ -1320,6 +1359,199 @@ static void mwifiex_main_work_queue(struct work_struct *work)
        mwifiex_main_process(adapter);
 }
 
+/*
+ * This function gets called during PCIe function level reset. Required
+ * code is extracted from mwifiex_remove_card()
+ */
+static int
+mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem)
+{
+       struct mwifiex_private *priv;
+       int i;
+
+       if (down_interruptible(sem))
+               goto exit_sem_err;
+
+       if (!adapter)
+               goto exit_remove;
+
+       priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+       mwifiex_deauthenticate(priv, NULL);
+
+       /* We can no longer handle interrupts once we start doing the teardown
+        * below.
+        */
+       if (adapter->if_ops.disable_int)
+               adapter->if_ops.disable_int(adapter);
+
+       adapter->surprise_removed = true;
+       mwifiex_terminate_workqueue(adapter);
+
+       /* Stop data */
+       for (i = 0; i < adapter->priv_num; i++) {
+               priv = adapter->priv[i];
+               if (priv && priv->netdev) {
+                       mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+                       if (netif_carrier_ok(priv->netdev))
+                               netif_carrier_off(priv->netdev);
+                       netif_device_detach(priv->netdev);
+               }
+       }
+
+       mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n");
+       adapter->init_wait_q_woken = false;
+
+       if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
+               wait_event_interruptible(adapter->init_wait_q,
+                                        adapter->init_wait_q_woken);
+       if (adapter->if_ops.down_dev)
+               adapter->if_ops.down_dev(adapter);
+
+       mwifiex_dbg(adapter, CMD, "cmd: mwifiex_shutdown_drv done\n");
+       if (atomic_read(&adapter->rx_pending) ||
+           atomic_read(&adapter->tx_pending) ||
+           atomic_read(&adapter->cmd_pending)) {
+               mwifiex_dbg(adapter, ERROR,
+                           "rx_pending=%d, tx_pending=%d,\t"
+                           "cmd_pending=%d\n",
+                           atomic_read(&adapter->rx_pending),
+                           atomic_read(&adapter->tx_pending),
+                           atomic_read(&adapter->cmd_pending));
+       }
+
+       for (i = 0; i < adapter->priv_num; i++) {
+               priv = adapter->priv[i];
+               if (!priv)
+                       continue;
+               rtnl_lock();
+               if (priv->netdev &&
+                   priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
+                       mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
+               rtnl_unlock();
+       }
+
+exit_remove:
+       up(sem);
+exit_sem_err:
+       mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+       return 0;
+}
+
+/* This function gets called during PCIe function level reset. Required
+ * code is extracted from mwifiex_add_card()
+ */
+static int
+mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct semaphore *sem,
+                 struct mwifiex_if_ops *if_ops, u8 iface_type)
+{
+       char fw_name[32];
+       struct pcie_service_card *card = adapter->card;
+
+       if (down_interruptible(sem))
+               goto exit_sem_err;
+
+       mwifiex_init_lock_list(adapter);
+       if (adapter->if_ops.up_dev)
+               adapter->if_ops.up_dev(adapter);
+
+       adapter->iface_type = iface_type;
+       adapter->card_sem = sem;
+
+       adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
+       adapter->surprise_removed = false;
+       init_waitqueue_head(&adapter->init_wait_q);
+       adapter->is_suspended = false;
+       adapter->hs_activated = false;
+       init_waitqueue_head(&adapter->hs_activate_wait_q);
+       init_waitqueue_head(&adapter->cmd_wait_q.wait);
+       adapter->cmd_wait_q.status = 0;
+       adapter->scan_wait_q_woken = false;
+
+       if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB)
+               adapter->rx_work_enabled = true;
+
+       adapter->workqueue =
+               alloc_workqueue("MWIFIEX_WORK_QUEUE",
+                               WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+       if (!adapter->workqueue)
+               goto err_kmalloc;
+
+       INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
+
+       if (adapter->rx_work_enabled) {
+               adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE",
+                                                       WQ_HIGHPRI |
+                                                       WQ_MEM_RECLAIM |
+                                                       WQ_UNBOUND, 1);
+               if (!adapter->rx_workqueue)
+                       goto err_kmalloc;
+               INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue);
+       }
+
+       /* Register the device. Fill up the private data structure with
+        * relevant information from the card. Some code extracted from
+        * mwifiex_register_dev()
+        */
+       mwifiex_dbg(adapter, INFO, "%s, mwifiex_init_hw_fw()...\n", __func__);
+       strcpy(fw_name, adapter->fw_name);
+       strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME);
+
+       adapter->tx_buf_size = card->pcie.tx_buf_size;
+       adapter->ext_scan = card->pcie.can_ext_scan;
+       if (mwifiex_init_hw_fw(adapter, false)) {
+               strcpy(adapter->fw_name, fw_name);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: firmware init failed\n", __func__);
+               goto err_init_fw;
+       }
+       strcpy(adapter->fw_name, fw_name);
+       mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+       up(sem);
+       return 0;
+
+err_init_fw:
+       mwifiex_dbg(adapter, ERROR, "info: %s: unregister device\n", __func__);
+       if (adapter->if_ops.unregister_dev)
+               adapter->if_ops.unregister_dev(adapter);
+       if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
+               mwifiex_dbg(adapter, ERROR,
+                           "info: %s: shutdown mwifiex\n", __func__);
+               adapter->init_wait_q_woken = false;
+
+               if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
+                       wait_event_interruptible(adapter->init_wait_q,
+                                                adapter->init_wait_q_woken);
+       }
+
+err_kmalloc:
+       mwifiex_terminate_workqueue(adapter);
+       adapter->surprise_removed = true;
+       up(sem);
+exit_sem_err:
+       mwifiex_dbg(adapter, INFO, "%s, error\n", __func__);
+
+       return -1;
+}
+
+/* This function processes pre and post PCIe function level resets.
+ * It performs software cleanup without touching PCIe specific code.
+ * Also, during initialization PCIe stuff is skipped.
+ */
+void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare)
+{
+       struct mwifiex_if_ops if_ops;
+
+       if (!prepare) {
+               mwifiex_reinit_sw(adapter, adapter->card_sem, &if_ops,
+                                 adapter->iface_type);
+       } else {
+               memcpy(&if_ops, &adapter->if_ops,
+                      sizeof(struct mwifiex_if_ops));
+               mwifiex_shutdown_sw(adapter, adapter->card_sem);
+       }
+}
+EXPORT_SYMBOL_GPL(mwifiex_do_flr);
+
 /*
  * This function adds the card.
  *
@@ -1391,7 +1623,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
                goto err_registerdev;
        }
 
-       if (mwifiex_init_hw_fw(adapter)) {
+       if (mwifiex_init_hw_fw(adapter, true)) {
                pr_err("%s: firmware init failed\n", __func__);
                goto err_init_fw;
        }
index 9f6bb40..26df28f 100644 (file)
@@ -58,6 +58,7 @@
 #include "sdio.h"
 
 extern const char driver_version[];
+extern bool mfg_mode;
 
 struct mwifiex_adapter;
 struct mwifiex_private;
@@ -675,6 +676,7 @@ struct mwifiex_private {
        struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
        u8 assoc_resp_ht_param;
        bool ht_param_present;
+       u8 random_mac[ETH_ALEN];
 };
 
 
@@ -827,6 +829,8 @@ struct mwifiex_if_ops {
        void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *);
        void (*multi_port_resync)(struct mwifiex_adapter *);
        bool (*is_port_ready)(struct mwifiex_private *);
+       void (*down_dev)(struct mwifiex_adapter *);
+       void (*up_dev)(struct mwifiex_adapter *);
 };
 
 struct mwifiex_adapter {
@@ -989,6 +993,7 @@ struct mwifiex_adapter {
        u32 drv_info_size;
        bool scan_chan_gap_enabled;
        struct sk_buff_head rx_data_q;
+       bool mfg_mode;
        struct mwifiex_chan_stats *chan_stats;
        u32 num_in_chan_stats;
        int survey_idx;
@@ -1004,6 +1009,7 @@ struct mwifiex_adapter {
        bool usb_mc_status;
        bool usb_mc_setup;
        struct cfg80211_wowlan_nd_info *nd_info;
+       struct ieee80211_regdomain *regd;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1625,4 +1631,5 @@ void mwifiex_debugfs_remove(void);
 void mwifiex_dev_debugfs_init(struct mwifiex_private *priv);
 void mwifiex_dev_debugfs_remove(struct mwifiex_private *priv);
 #endif
+void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare);
 #endif /* !_MWIFIEX_MAIN_H_ */
index 453ab6a..3c3c4f1 100644 (file)
@@ -225,7 +225,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        if (!adapter || !adapter->priv_num)
                return;
 
-       if (user_rmmod) {
+       if (user_rmmod && !adapter->mfg_mode) {
 #ifdef CONFIG_PM_SLEEP
                if (adapter->is_suspended)
                        mwifiex_pcie_resume(&pdev->dev);
@@ -277,6 +277,52 @@ static const struct pci_device_id mwifiex_ids[] = {
 
 MODULE_DEVICE_TABLE(pci, mwifiex_ids);
 
+static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
+{
+       struct mwifiex_adapter *adapter;
+       struct pcie_service_card *card;
+
+       if (!pdev) {
+               pr_err("%s: PCIe device is not specified\n", __func__);
+               return;
+       }
+
+       card = (struct pcie_service_card *)pci_get_drvdata(pdev);
+       if (!card || !card->adapter) {
+               pr_err("%s: Card or adapter structure is not valid (%ld)\n",
+                      __func__, (long)card);
+               return;
+       }
+
+       adapter = card->adapter;
+       mwifiex_dbg(adapter, INFO,
+                   "%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n",
+                   __func__, pdev->vendor, pdev->device,
+                   pdev->revision,
+                   prepare ? "Pre-FLR" : "Post-FLR");
+
+       if (prepare) {
+               /* Kernel would be performing FLR after this notification.
+                * Cleanup all software without cleaning anything related to
+                * PCIe and HW.
+                */
+               mwifiex_do_flr(adapter, prepare);
+               adapter->surprise_removed = true;
+       } else {
+               /* Kernel stores and restores PCIe function context before and
+                * after performing FLR respectively. Reconfigure the software
+                * and firmware including firmware redownload
+                */
+               adapter->surprise_removed = false;
+               mwifiex_do_flr(adapter, prepare);
+       }
+       mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+}
+
+static const struct pci_error_handlers mwifiex_pcie_err_handler[] = {
+               { .reset_notify = mwifiex_pcie_reset_notify, },
+};
+
 #ifdef CONFIG_PM_SLEEP
 /* Power Management Hooks */
 static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend,
@@ -295,6 +341,7 @@ static struct pci_driver __refdata mwifiex_pcie = {
        },
 #endif
        .shutdown = mwifiex_pcie_shutdown,
+       .err_handler = mwifiex_pcie_err_handler,
 };
 
 /*
@@ -1956,8 +2003,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                        if (firmware_len - offset < txlen)
                                txlen = firmware_len - offset;
 
-                       mwifiex_dbg(adapter, INFO, ".");
-
                        tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
                                    card->pcie.blksz_fw_dl;
 
@@ -2043,6 +2088,10 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
                        ret = -1;
                else
                        ret = 0;
+
+               mwifiex_dbg(adapter, INFO, "Try %d if FW is ready <%d,%#x>",
+                           tries, ret, firmware_stat);
+
                if (ret)
                        continue;
                if (firmware_stat == FIRMWARE_READY_PCIE) {
@@ -2074,8 +2123,7 @@ mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
                adapter->winner = 1;
        } else {
                mwifiex_dbg(adapter, ERROR,
-                           "PCI-E is not the winner <%#x,%d>, exit dnld\n",
-                           ret, adapter->winner);
+                           "PCI-E is not the winner <%#x>", winner);
        }
 
        return ret;
@@ -2863,7 +2911,7 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
 static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
 {
        int revision_id = 0;
-       int version;
+       int version, magic;
        struct pcie_service_card *card = adapter->card;
 
        switch (card->dev->device) {
@@ -2888,30 +2936,19 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
                }
                break;
        case PCIE_DEVICE_ID_MARVELL_88W8997:
-               mwifiex_read_reg(adapter, 0x0c48, &revision_id);
+               mwifiex_read_reg(adapter, 0x8, &revision_id);
                mwifiex_read_reg(adapter, 0x0cd0, &version);
+               mwifiex_read_reg(adapter, 0x0cd4, &magic);
+               revision_id &= 0xff;
                version &= 0x7;
-               switch (revision_id) {
-               case PCIE8997_V2:
-                       if (version == CHIP_VER_PCIEUART)
-                               strcpy(adapter->fw_name,
-                                      PCIEUART8997_FW_NAME_V2);
-                       else
-                               strcpy(adapter->fw_name,
-                                      PCIEUSB8997_FW_NAME_V2);
-                       break;
-               case PCIE8997_Z:
-                       if (version == CHIP_VER_PCIEUART)
-                               strcpy(adapter->fw_name,
-                                      PCIEUART8997_FW_NAME_Z);
-                       else
-                               strcpy(adapter->fw_name,
-                                      PCIEUSB8997_FW_NAME_Z);
-                       break;
-               default:
-                       strcpy(adapter->fw_name, PCIE8997_DEFAULT_FW_NAME);
-                       break;
-               }
+               magic &= 0xff;
+               if (revision_id == PCIE8997_A1 &&
+                   magic == CHIP_MAGIC_VALUE &&
+                   version == CHIP_VER_PCIEUART)
+                       strcpy(adapter->fw_name, PCIEUART8997_FW_NAME_V4);
+               else
+                       strcpy(adapter->fw_name, PCIEUSB8997_FW_NAME_V4);
+               break;
        default:
                break;
        }
@@ -2952,7 +2989,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
 static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
-       const struct mwifiex_pcie_card_reg *reg;
        struct pci_dev *pdev;
        int i;
 
@@ -2976,8 +3012,90 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
                        if (card->msi_enable)
                                pci_disable_msi(pdev);
               }
+       }
+}
+
+/* This function initializes the PCI-E host memory space, WCB rings, etc.
+ *
+ * The following initializations steps are followed -
+ *      - Allocate TXBD ring buffers
+ *      - Allocate RXBD ring buffers
+ *      - Allocate event BD ring buffers
+ *      - Allocate command response ring buffer
+ *      - Allocate sleep cookie buffer
+ * Part of mwifiex_pcie_init(), not reset the PCIE registers
+ */
+static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+       int ret;
+       struct pci_dev *pdev = card->dev;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
-               reg = card->pcie.reg;
+       card->cmdrsp_buf = NULL;
+       ret = mwifiex_pcie_create_txbd_ring(adapter);
+       if (ret) {
+               mwifiex_dbg(adapter, ERROR, "Failed to create txbd ring\n");
+               goto err_cre_txbd;
+       }
+
+       ret = mwifiex_pcie_create_rxbd_ring(adapter);
+       if (ret) {
+               mwifiex_dbg(adapter, ERROR, "Failed to create rxbd ring\n");
+               goto err_cre_rxbd;
+       }
+
+       ret = mwifiex_pcie_create_evtbd_ring(adapter);
+       if (ret) {
+               mwifiex_dbg(adapter, ERROR, "Failed to create evtbd ring\n");
+               goto err_cre_evtbd;
+       }
+
+       ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
+       if (ret) {
+               mwifiex_dbg(adapter, ERROR, "Failed to allocate cmdbuf buffer\n");
+               goto err_alloc_cmdbuf;
+       }
+
+       if (reg->sleep_cookie) {
+               ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
+               if (ret) {
+                       mwifiex_dbg(adapter, ERROR, "Failed to allocate sleep_cookie buffer\n");
+                       goto err_alloc_cookie;
+               }
+       } else {
+               card->sleep_cookie_vbase = NULL;
+       }
+       return;
+
+err_alloc_cookie:
+       mwifiex_pcie_delete_cmdrsp_buf(adapter);
+err_alloc_cmdbuf:
+       mwifiex_pcie_delete_evtbd_ring(adapter);
+err_cre_evtbd:
+       mwifiex_pcie_delete_rxbd_ring(adapter);
+err_cre_rxbd:
+       mwifiex_pcie_delete_txbd_ring(adapter);
+err_cre_txbd:
+       pci_iounmap(pdev, card->pci_mmap1);
+}
+
+/* This function cleans up the PCI-E host memory space.
+ * Some code is extracted from mwifiex_unregister_dev()
+ *
+ */
+static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+       if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
+               mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n");
+
+       adapter->seq_num = 0;
+       adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
+
+       if (card) {
                if (reg->sleep_cookie)
                        mwifiex_pcie_delete_sleep_cookie_buf(adapter);
 
@@ -2987,6 +3105,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
                mwifiex_pcie_delete_txbd_ring(adapter);
                card->cmdrsp_buf = NULL;
        }
+
+       return;
 }
 
 static struct mwifiex_if_ops pcie_ops = {
@@ -3013,6 +3133,8 @@ static struct mwifiex_if_ops pcie_ops = {
        .clean_pcie_ring =              mwifiex_clean_pcie_ring_buf,
        .reg_dump =                     mwifiex_pcie_reg_dump,
        .device_dump =                  mwifiex_pcie_device_dump,
+       .down_dev =                     mwifiex_pcie_down_dev,
+       .up_dev =                       mwifiex_pcie_up_dev,
 };
 
 /*
index f05061c..46f99ca 100644 (file)
 #define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
 #define PCIE8897_A0_FW_NAME "mrvl/pcie8897_uapsta_a0.bin"
 #define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin"
-#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcieusb8997_combo_v2.bin"
-#define PCIEUART8997_FW_NAME_Z "mrvl/pcieuart8997_combo.bin"
-#define PCIEUART8997_FW_NAME_V2 "mrvl/pcieuart8997_combo_v2.bin"
-#define PCIEUSB8997_FW_NAME_Z "mrvl/pcieusb8997_combo.bin"
-#define PCIEUSB8997_FW_NAME_V2 "mrvl/pcieusb8997_combo_v2.bin"
+#define PCIEUART8997_FW_NAME_V4 "mrvl/pcieuart8997_combo_v4.bin"
+#define PCIEUSB8997_FW_NAME_V4 "mrvl/pcieusb8997_combo_v4.bin"
+#define PCIE8997_DEFAULT_WIFIFW_NAME "mrvl/pcie8997_wlan_v4.bin"
 
 #define PCIE_VENDOR_ID_MARVELL              (0x11ab)
 #define PCIE_VENDOR_ID_V2_MARVELL           (0x1b4b)
 
 #define PCIE8897_A0    0x1100
 #define PCIE8897_B0    0x1200
-#define PCIE8997_Z     0x0
-#define PCIE8997_V2    0x471
+#define PCIE8997_A0    0x10
+#define PCIE8997_A1    0x11
 #define CHIP_VER_PCIEUART      0x3
+#define CHIP_MAGIC_VALUE       0x24
 
 /* Constants for Buffer Descriptor (BD) rings */
 #define MWIFIEX_MAX_TXRX_BD                    0x20
index 21ec847..97c9765 100644 (file)
@@ -820,6 +820,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        struct mwifiex_adapter *adapter = priv->adapter;
        struct mwifiex_ie_types_num_probes *num_probes_tlv;
        struct mwifiex_ie_types_scan_chan_gap *chan_gap_tlv;
+       struct mwifiex_ie_types_random_mac *random_mac_tlv;
        struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
        struct mwifiex_ie_types_bssid_list *bssid_tlv;
        u8 *tlv_pos;
@@ -835,6 +836,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        u8 ssid_filter;
        struct mwifiex_ie_types_htcap *ht_cap;
        struct mwifiex_ie_types_bss_mode *bss_mode;
+       const u8 zero_mac[6] = {0, 0, 0, 0, 0, 0};
 
        /* The tlv_buf_len is calculated for each scan command.  The TLVs added
           in this routine will be preserved since the routine that sends the
@@ -967,6 +969,18 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                        tlv_pos +=
                                  sizeof(struct mwifiex_ie_types_scan_chan_gap);
                }
+
+               if (!ether_addr_equal(user_scan_in->random_mac, zero_mac)) {
+                       random_mac_tlv = (void *)tlv_pos;
+                       random_mac_tlv->header.type =
+                                        cpu_to_le16(TLV_TYPE_RANDOM_MAC);
+                       random_mac_tlv->header.len =
+                                   cpu_to_le16(sizeof(random_mac_tlv->mac));
+                       ether_addr_copy(random_mac_tlv->mac,
+                                       user_scan_in->random_mac);
+                       tlv_pos +=
+                                 sizeof(struct mwifiex_ie_types_random_mac);
+               }
        } else {
                scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
                num_probes = adapter->scan_probes;
@@ -1922,6 +1936,7 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
        }
 
        adapter->active_scan_triggered = true;
+       ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
        user_scan_cfg->num_ssids = priv->scan_request->n_ssids;
        user_scan_cfg->ssid_list = priv->scan_request->ssids;
 
@@ -2179,18 +2194,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
 
                if (chan_band_tlv && adapter->nd_info) {
                        adapter->nd_info->matches[idx] =
-                               kzalloc(sizeof(*pmatch) +
-                               sizeof(u32), GFP_ATOMIC);
+                               kzalloc(sizeof(*pmatch) + sizeof(u32),
+                                       GFP_ATOMIC);
 
                        pmatch = adapter->nd_info->matches[idx];
 
                        if (pmatch) {
-                               memset(pmatch, 0, sizeof(*pmatch));
-                               if (chan_band_tlv) {
-                                       pmatch->n_channels = 1;
-                                       pmatch->channels[0] =
-                                               chan_band->chan_number;
-                               }
+                               pmatch->n_channels = 1;
+                               pmatch->channels[0] = chan_band->chan_number;
                        }
                }
 
@@ -2761,6 +2772,7 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
        if (!scan_cfg)
                return -ENOMEM;
 
+       ether_addr_copy(scan_cfg->random_mac, priv->random_mac);
        scan_cfg->ssid_list = req_ssid;
        scan_cfg->num_ssids = 1;
 
index d3e1561..8718950 100644 (file)
@@ -122,9 +122,11 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
                                               IRQF_TRIGGER_LOW,
                                               "wifi_wake", cfg);
                        if (ret) {
-                               dev_err(dev,
+                               dev_dbg(dev,
                                        "Failed to request irq_wifi %d (%d)\n",
                                        cfg->irq_wifi, ret);
+                               card->plt_wake_cfg = NULL;
+                               return 0;
                        }
                        disable_irq(cfg->irq_wifi);
                }
@@ -289,7 +291,7 @@ mwifiex_sdio_remove(struct sdio_func *func)
 
        mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
 
-       if (user_rmmod) {
+       if (user_rmmod && !adapter->mfg_mode) {
                if (adapter->is_suspended)
                        mwifiex_sdio_resume(adapter->dev);
 
index 7897037..49048b4 100644 (file)
@@ -706,15 +706,10 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
                                    (priv->wep_key_curr_index & KEY_INDEX_MASK))
                                        key_info |= KEY_DEFAULT;
                        } else {
-                               if (mac) {
-                                       if (is_broadcast_ether_addr(mac))
-                                               key_info |= KEY_MCAST;
-                                       else
-                                               key_info |= KEY_UNICAST |
-                                                           KEY_DEFAULT;
-                               } else {
+                               if (is_broadcast_ether_addr(mac))
                                        key_info |= KEY_MCAST;
-                               }
+                               else
+                                       key_info |= KEY_UNICAST | KEY_DEFAULT;
                        }
                }
                km->key_param_set.key_info = cpu_to_le16(key_info);
@@ -1244,20 +1239,23 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
                return 0;
 
        /* Send the ring base addresses and count to firmware */
-       host_spec->txbd_addr_lo = (u32)(card->txbd_ring_pbase);
-       host_spec->txbd_addr_hi = (u32)(((u64)card->txbd_ring_pbase)>>32);
-       host_spec->txbd_count = MWIFIEX_MAX_TXRX_BD;
-       host_spec->rxbd_addr_lo = (u32)(card->rxbd_ring_pbase);
-       host_spec->rxbd_addr_hi = (u32)(((u64)card->rxbd_ring_pbase)>>32);
-       host_spec->rxbd_count = MWIFIEX_MAX_TXRX_BD;
-       host_spec->evtbd_addr_lo = (u32)(card->evtbd_ring_pbase);
-       host_spec->evtbd_addr_hi = (u32)(((u64)card->evtbd_ring_pbase)>>32);
-       host_spec->evtbd_count = MWIFIEX_MAX_EVT_BD;
+       host_spec->txbd_addr_lo = cpu_to_le32((u32)(card->txbd_ring_pbase));
+       host_spec->txbd_addr_hi =
+                       cpu_to_le32((u32)(((u64)card->txbd_ring_pbase) >> 32));
+       host_spec->txbd_count = cpu_to_le32(MWIFIEX_MAX_TXRX_BD);
+       host_spec->rxbd_addr_lo = cpu_to_le32((u32)(card->rxbd_ring_pbase));
+       host_spec->rxbd_addr_hi =
+                       cpu_to_le32((u32)(((u64)card->rxbd_ring_pbase) >> 32));
+       host_spec->rxbd_count = cpu_to_le32(MWIFIEX_MAX_TXRX_BD);
+       host_spec->evtbd_addr_lo = cpu_to_le32((u32)(card->evtbd_ring_pbase));
+       host_spec->evtbd_addr_hi =
+                       cpu_to_le32((u32)(((u64)card->evtbd_ring_pbase) >> 32));
+       host_spec->evtbd_count = cpu_to_le32(MWIFIEX_MAX_EVT_BD);
        if (card->sleep_cookie_vbase) {
                host_spec->sleep_cookie_addr_lo =
-                                               (u32)(card->sleep_cookie_pbase);
-               host_spec->sleep_cookie_addr_hi =
-                                (u32)(((u64)(card->sleep_cookie_pbase)) >> 32);
+                               cpu_to_le32((u32)(card->sleep_cookie_pbase));
+               host_spec->sleep_cookie_addr_hi = cpu_to_le32((u32)(((u64)
+                                       (card->sleep_cookie_pbase)) >> 32));
                mwifiex_dbg(priv->adapter, INFO,
                            "sleep_cook_lo phy addr: 0x%x\n",
                            host_spec->sleep_cookie_addr_lo);
@@ -1482,7 +1480,7 @@ int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
                        continue;
 
                /* property header is 6 bytes, data must fit in cmd buffer */
-               if (prop && prop->value && prop->length > 6 &&
+               if (prop->value && prop->length > 6 &&
                    prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) {
                        ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
                                               HostCmd_ACT_GEN_SET, 0,
@@ -1596,6 +1594,21 @@ static int mwifiex_cmd_gtk_rekey_offload(struct mwifiex_private *priv,
        return 0;
 }
 
+static int mwifiex_cmd_chan_region_cfg(struct mwifiex_private *priv,
+                                      struct host_cmd_ds_command *cmd,
+                                      u16 cmd_action)
+{
+       struct host_cmd_ds_chan_region_cfg *reg = &cmd->params.reg_cfg;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_CHAN_REGION_CFG);
+       cmd->size = cpu_to_le16(sizeof(*reg) + S_DS_GEN);
+
+       if (cmd_action == HostCmd_ACT_GEN_GET)
+               reg->action = cpu_to_le16(cmd_action);
+
+       return 0;
+}
+
 static int
 mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
                         struct host_cmd_ds_command *cmd,
@@ -2136,6 +2149,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_gtk_rekey_offload(priv, cmd_ptr, cmd_action,
                                                    data_buf);
                break;
+       case HostCmd_CMD_CHAN_REGION_CFG:
+               ret = mwifiex_cmd_chan_region_cfg(priv, cmd_ptr, cmd_action);
+               break;
        default:
                mwifiex_dbg(priv->adapter, ERROR,
                            "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2273,6 +2289,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
                        if (ret)
                                return -1;
                }
+
+               mwifiex_send_cmd(priv, HostCmd_CMD_CHAN_REGION_CFG,
+                                HostCmd_ACT_GEN_GET, 0, NULL, true);
        }
 
        /* get tx rate */
index ccf5493..3344a26 100644 (file)
@@ -962,7 +962,7 @@ static int mwifiex_ret_uap_sta_list(struct mwifiex_private *priv,
        int i;
        struct mwifiex_sta_node *sta_node;
 
-       for (i = 0; i < sta_list->sta_count; i++) {
+       for (i = 0; i < (le16_to_cpu(sta_list->sta_count)); i++) {
                sta_node = mwifiex_get_sta_entry(priv, sta_info->mac);
                if (unlikely(!sta_node))
                        continue;
@@ -1022,6 +1022,135 @@ static int mwifiex_ret_robust_coex(struct mwifiex_private *priv,
        return 0;
 }
 
+static struct ieee80211_regdomain *
+mwifiex_create_custom_regdomain(struct mwifiex_private *priv,
+                               u8 *buf, u16 buf_len)
+{
+       u16 num_chan = buf_len / 2;
+       struct ieee80211_regdomain *regd;
+       struct ieee80211_reg_rule *rule;
+       bool new_rule;
+       int regd_size, idx, freq, prev_freq = 0;
+       u32 bw, prev_bw = 0;
+       u8 chflags, prev_chflags = 0, valid_rules = 0;
+
+       if (WARN_ON_ONCE(num_chan > NL80211_MAX_SUPP_REG_RULES))
+               return ERR_PTR(-EINVAL);
+
+       regd_size = sizeof(struct ieee80211_regdomain) +
+                   num_chan * sizeof(struct ieee80211_reg_rule);
+
+       regd = kzalloc(regd_size, GFP_KERNEL);
+       if (!regd)
+               return ERR_PTR(-ENOMEM);
+
+       for (idx = 0; idx < num_chan; idx++) {
+               u8 chan;
+               enum nl80211_band band;
+
+               chan = *buf++;
+               if (!chan)
+                       return NULL;
+               chflags = *buf++;
+               band = (chan <= 14) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+               freq = ieee80211_channel_to_frequency(chan, band);
+               new_rule = false;
+
+               if (chflags & MWIFIEX_CHANNEL_DISABLED)
+                       continue;
+
+               if (band == NL80211_BAND_5GHZ) {
+                       if (!(chflags & MWIFIEX_CHANNEL_NOHT80))
+                               bw = MHZ_TO_KHZ(80);
+                       else if (!(chflags & MWIFIEX_CHANNEL_NOHT40))
+                               bw = MHZ_TO_KHZ(40);
+                       else
+                               bw = MHZ_TO_KHZ(20);
+               } else {
+                       if (!(chflags & MWIFIEX_CHANNEL_NOHT40))
+                               bw = MHZ_TO_KHZ(40);
+                       else
+                               bw = MHZ_TO_KHZ(20);
+               }
+
+               if (idx == 0 || prev_chflags != chflags || prev_bw != bw ||
+                   freq - prev_freq > 20) {
+                       valid_rules++;
+                       new_rule = true;
+               }
+
+               rule = &regd->reg_rules[valid_rules - 1];
+
+               rule->freq_range.end_freq_khz = MHZ_TO_KHZ(freq + 10);
+
+               prev_chflags = chflags;
+               prev_freq = freq;
+               prev_bw = bw;
+
+               if (!new_rule)
+                       continue;
+
+               rule->freq_range.start_freq_khz = MHZ_TO_KHZ(freq - 10);
+               rule->power_rule.max_eirp = DBM_TO_MBM(19);
+
+               if (chflags & MWIFIEX_CHANNEL_PASSIVE)
+                       rule->flags = NL80211_RRF_NO_IR;
+
+               if (chflags & MWIFIEX_CHANNEL_DFS)
+                       rule->flags = NL80211_RRF_DFS;
+
+               rule->freq_range.max_bandwidth_khz = bw;
+       }
+
+       regd->n_reg_rules = valid_rules;
+       regd->alpha2[0] = '9';
+       regd->alpha2[1] = '9';
+
+       return regd;
+}
+
+static int mwifiex_ret_chan_region_cfg(struct mwifiex_private *priv,
+                                      struct host_cmd_ds_command *resp)
+{
+       struct host_cmd_ds_chan_region_cfg *reg = &resp->params.reg_cfg;
+       u16 action = le16_to_cpu(reg->action);
+       u16 tlv, tlv_buf_len, tlv_buf_left;
+       struct mwifiex_ie_types_header *head;
+       u8 *tlv_buf;
+
+       if (action != HostCmd_ACT_GEN_GET)
+               return 0;
+
+       tlv_buf = (u8 *)reg + sizeof(*reg);
+       tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*reg);
+
+       while (tlv_buf_left >= sizeof(*head)) {
+               head = (struct mwifiex_ie_types_header *)tlv_buf;
+               tlv = le16_to_cpu(head->type);
+               tlv_buf_len = le16_to_cpu(head->len);
+
+               if (tlv_buf_left < (sizeof(*head) + tlv_buf_len))
+                       break;
+
+               switch (tlv) {
+               case TLV_TYPE_CHAN_ATTR_CFG:
+                       mwifiex_dbg_dump(priv->adapter, CMD_D, "CHAN:",
+                                        (u8 *)head + sizeof(*head),
+                                        tlv_buf_len);
+                       priv->adapter->regd =
+                               mwifiex_create_custom_regdomain(priv,
+                                                               (u8 *)head +
+                                               sizeof(*head), tlv_buf_len);
+                       break;
+               }
+
+               tlv_buf += (sizeof(*head) + tlv_buf_len);
+               tlv_buf_left -= (sizeof(*head) + tlv_buf_len);
+       }
+
+       return 0;
+}
+
 /*
  * This function handles the command responses.
  *
@@ -1239,6 +1368,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                break;
        case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
                break;
+       case HostCmd_CMD_CHAN_REGION_CFG:
+               ret = mwifiex_ret_chan_region_cfg(priv, resp);
+               break;
        default:
                mwifiex_dbg(adapter, ERROR,
                            "CMD_RESP: unknown cmd response %#x\n",
index a422f33..9df0c4d 100644 (file)
 #include "wmm.h"
 #include "11n.h"
 
+#define MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE    12
+
+static int mwifiex_check_ibss_peer_capabilties(struct mwifiex_private *priv,
+                                              struct mwifiex_sta_node *sta_ptr,
+                                              struct sk_buff *event)
+{
+       int evt_len, ele_len;
+       u8 *curr;
+       struct ieee_types_header *ele_hdr;
+       struct mwifiex_ie_types_mgmt_frame *tlv_mgmt_frame;
+       const struct ieee80211_ht_cap *ht_cap;
+       const struct ieee80211_vht_cap *vht_cap;
+
+       skb_pull(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE);
+       evt_len = event->len;
+       curr = event->data;
+
+       mwifiex_dbg_dump(priv->adapter, EVT_D, "ibss peer capabilties:",
+                        event->data, event->len);
+
+       skb_push(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE);
+
+       tlv_mgmt_frame = (void *)curr;
+       if (evt_len >= sizeof(*tlv_mgmt_frame) &&
+           le16_to_cpu(tlv_mgmt_frame->header.type) ==
+           TLV_TYPE_UAP_MGMT_FRAME) {
+               /* Locate curr pointer to the start of beacon tlv,
+                * timestamp 8 bytes, beacon intervel 2 bytes,
+                * capability info 2 bytes, totally 12 byte beacon header
+                */
+               evt_len = le16_to_cpu(tlv_mgmt_frame->header.len);
+               curr += (sizeof(*tlv_mgmt_frame) + 12);
+       } else {
+               mwifiex_dbg(priv->adapter, MSG,
+                           "management frame tlv not found!\n");
+               return 0;
+       }
+
+       while (evt_len >= sizeof(*ele_hdr)) {
+               ele_hdr = (struct ieee_types_header *)curr;
+               ele_len = ele_hdr->len;
+
+               if (evt_len < ele_len + sizeof(*ele_hdr))
+                       break;
+
+               switch (ele_hdr->element_id) {
+               case WLAN_EID_HT_CAPABILITY:
+                       sta_ptr->is_11n_enabled = true;
+                       ht_cap = (void *)(ele_hdr + 2);
+                       sta_ptr->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+                               IEEE80211_HT_CAP_MAX_AMSDU ?
+                               MWIFIEX_TX_DATA_BUF_SIZE_8K :
+                               MWIFIEX_TX_DATA_BUF_SIZE_4K;
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "11n enabled!, max_amsdu : %d\n",
+                                   sta_ptr->max_amsdu);
+                       break;
+
+               case WLAN_EID_VHT_CAPABILITY:
+                       sta_ptr->is_11ac_enabled = true;
+                       vht_cap = (void *)(ele_hdr + 2);
+                       /* check VHT MAXMPDU capability */
+                       switch (le32_to_cpu(vht_cap->vht_cap_info) & 0x3) {
+                       case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+                               sta_ptr->max_amsdu =
+                                       MWIFIEX_TX_DATA_BUF_SIZE_12K;
+                               break;
+                       case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
+                               sta_ptr->max_amsdu =
+                                       MWIFIEX_TX_DATA_BUF_SIZE_8K;
+                               break;
+                       case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
+                               sta_ptr->max_amsdu =
+                                       MWIFIEX_TX_DATA_BUF_SIZE_4K;
+                       default:
+                               break;
+                       }
+
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "11ac enabled!, max_amsdu : %d\n",
+                                   sta_ptr->max_amsdu);
+                       break;
+               default:
+                       break;
+               }
+
+               curr += (ele_len + sizeof(*ele_hdr));
+               evt_len -= (ele_len + sizeof(*ele_hdr));
+       }
+
+       return 0;
+}
+
 /*
  * This function resets the connection state.
  *
@@ -519,6 +612,8 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
  *      - EVENT_LINK_QUALITY
  *      - EVENT_PRE_BEACON_LOST
  *      - EVENT_IBSS_COALESCED
+ *      - EVENT_IBSS_STA_CONNECT
+ *      - EVENT_IBSS_STA_DISCONNECT
  *      - EVENT_WEP_ICV_ERR
  *      - EVENT_BW_CHANGE
  *      - EVENT_HOSTWAKE_STAIE
@@ -547,9 +642,11 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
 int mwifiex_process_sta_event(struct mwifiex_private *priv)
 {
        struct mwifiex_adapter *adapter = priv->adapter;
-       int ret = 0;
+       int ret = 0, i;
        u32 eventcause = adapter->event_cause;
        u16 ctrl, reason_code;
+       u8 ibss_sta_addr[ETH_ALEN];
+       struct mwifiex_sta_node *sta_ptr;
 
        switch (eventcause) {
        case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -708,7 +805,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_EXT_SCAN_REPORT:
                mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
-               if (adapter->ext_scan && !priv->scan_aborting)
+               /* We intend to skip this event during suspend, but handle
+                * it in interface disabled case
+                */
+               if (adapter->ext_scan && (!priv->scan_aborting ||
+                                         !netif_running(priv->netdev)))
                        ret = mwifiex_handle_event_ext_scan_report(priv,
                                                adapter->event_skb->data);
 
@@ -771,6 +872,39 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
                                HostCmd_ACT_GEN_GET, 0, NULL, false);
                break;
+       case EVENT_IBSS_STA_CONNECT:
+               ether_addr_copy(ibss_sta_addr, adapter->event_body + 2);
+               mwifiex_dbg(adapter, EVENT, "event: IBSS_STA_CONNECT %pM\n",
+                           ibss_sta_addr);
+               sta_ptr = mwifiex_add_sta_entry(priv, ibss_sta_addr);
+               if (sta_ptr && adapter->adhoc_11n_enabled) {
+                       mwifiex_check_ibss_peer_capabilties(priv, sta_ptr,
+                                                           adapter->event_skb);
+                       if (sta_ptr->is_11n_enabled)
+                               for (i = 0; i < MAX_NUM_TID; i++)
+                                       sta_ptr->ampdu_sta[i] =
+                                       priv->aggr_prio_tbl[i].ampdu_user;
+                       else
+                               for (i = 0; i < MAX_NUM_TID; i++)
+                                       sta_ptr->ampdu_sta[i] =
+                                               BA_STREAM_NOT_ALLOWED;
+                       memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
+               }
+
+               break;
+       case EVENT_IBSS_STA_DISCONNECT:
+               ether_addr_copy(ibss_sta_addr, adapter->event_body + 2);
+               mwifiex_dbg(adapter, EVENT, "event: IBSS_STA_DISCONNECT %pM\n",
+                           ibss_sta_addr);
+               sta_ptr = mwifiex_get_sta_entry(priv, ibss_sta_addr);
+               if (sta_ptr && sta_ptr->is_11n_enabled) {
+                       mwifiex_11n_del_rx_reorder_tbl_by_ta(priv,
+                                                            ibss_sta_addr);
+                       mwifiex_del_tx_ba_stream_tbl_by_ra(priv, ibss_sta_addr);
+               }
+               mwifiex_wmm_del_peer_ra_list(priv, ibss_sta_addr);
+               mwifiex_del_sta_entry(priv, ibss_sta_addr);
+               break;
        case EVENT_ADDBA:
                mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
                mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
@@ -869,6 +1003,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                mwifiex_bt_coex_wlan_param_update_event(priv,
                                                        adapter->event_skb);
                break;
+       case EVENT_RXBA_SYNC:
+               dev_dbg(adapter->dev, "EVENT: RXBA_SYNC\n");
+               mwifiex_11n_rxba_sync_event(priv, adapter->event_body,
+                                           adapter->event_skb->len -
+                                           sizeof(eventcause));
+               break;
        default:
                mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n",
                            eventcause);
index e06647a..644f3a2 100644 (file)
@@ -574,7 +574,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
 
        adapter->hs_activate_wait_q_woken = false;
 
-       memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
+       memset(&hscfg, 0, sizeof(hscfg));
        hscfg.is_invoke_hostcmd = true;
 
        adapter->hs_enabling = true;
@@ -1138,7 +1138,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
 {
        struct mwifiex_ds_encrypt_key encrypt_key;
 
-       memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
+       memset(&encrypt_key, 0, sizeof(encrypt_key));
        encrypt_key.key_len = key_len;
        encrypt_key.key_index = key_index;
 
@@ -1180,7 +1180,7 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel)
 {
        struct mwifiex_ver_ext ver_ext;
 
-       memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
+       memset(&ver_ext, 0, sizeof(ver_ext));
        ver_ext.version_str_sel = version_str_sel;
        if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
                             HostCmd_ACT_GEN_GET, 0, &ver_ext, true))
index 86ff542..d24eca3 100644 (file)
@@ -306,7 +306,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
                mwifiex_process_multi_chan_event(priv, adapter->event_skb);
                break;
-
+       case EVENT_RXBA_SYNC:
+               dev_dbg(adapter->dev, "EVENT: RXBA_SYNC\n");
+               mwifiex_11n_rxba_sync_event(priv, adapter->event_body,
+                                           adapter->event_skb->len -
+                                           sizeof(eventcause));
+               break;
        default:
                mwifiex_dbg(adapter, EVENT,
                            "event: unknown event id: %#x\n", eventcause);
index 3bd04f5..8a20620 100644 (file)
@@ -611,7 +611,7 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
        if (!adapter->priv_num)
                return;
 
-       if (user_rmmod) {
+       if (user_rmmod && !adapter->mfg_mode) {
 #ifdef CONFIG_PM
                if (adapter->is_suspended)
                        mwifiex_usb_resume(intf);
@@ -1026,6 +1026,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                        dnld_cmd = le32_to_cpu(fwdata->fw_hdr.dnld_cmd);
                        tlen += sizeof(struct fw_header);
 
+                       /* Command 7 doesn't have data length field */
+                       if (dnld_cmd == FW_CMD_7)
+                               dlen = 0;
+
                        memcpy(fwdata->data, &firmware[tlen], dlen);
 
                        fwdata->seq_num = cpu_to_le32(fw_seqnum);
index b4e9246..30e8eb8 100644 (file)
 #define USB8766_DEFAULT_FW_NAME        "mrvl/usb8766_uapsta.bin"
 #define USB8797_DEFAULT_FW_NAME        "mrvl/usb8797_uapsta.bin"
 #define USB8801_DEFAULT_FW_NAME        "mrvl/usb8801_uapsta.bin"
-#define USB8997_DEFAULT_FW_NAME        "mrvl/usb8997_uapsta.bin"
+#define USB8997_DEFAULT_FW_NAME        "mrvl/usbusb8997_combo_v4.bin"
 
 #define FW_DNLD_TX_BUF_SIZE    620
 #define FW_DNLD_RX_BUF_SIZE    2048
 #define FW_HAS_LAST_BLOCK      0x00000004
+#define FW_CMD_7               0x00000007
 
 #define FW_DATA_XMIT_SIZE \
        (sizeof(struct fw_header) + dlen + sizeof(u32))
index 6681be0..18fbb96 100644 (file)
@@ -386,6 +386,7 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
                                    "unknown public action frame category %d\n",
                                    category);
                }
+               break;
        default:
                mwifiex_dbg(priv->adapter, INFO,
                    "unknown mgmt frame subtype %#x\n", stype);
index 57a80cf..a8bc064 100644 (file)
@@ -103,7 +103,7 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
 
        if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
                dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
-       if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
+       if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
                dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
 
        trace_mt_rx(dev, rxwi, fce_info);
index 978e8a9..270d126 100644 (file)
@@ -18,8 +18,6 @@
 #include <asm/unaligned.h>
 #include <linux/skbuff.h>
 
-#include "util.h"
-
 #define MT_DMA_HDR_LEN                 4
 #define MT_RX_INFO_LEN                 4
 #define MT_FCE_INFO_LEN                        4
@@ -79,9 +77,9 @@ static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
         */
 
        info = flags |
-               MT76_SET(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
-               MT76_SET(MT_TXD_INFO_D_PORT, d_port) |
-               MT76_SET(MT_TXD_INFO_TYPE, type);
+               FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+               FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
+               FIELD_PREP(MT_TXD_INFO_TYPE, type);
 
        put_unaligned_le32(info, skb_push(skb, sizeof(info)));
        return skb_put_padto(skb, round_up(skb->len, 4) + 4);
@@ -90,7 +88,7 @@ static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
 static inline int
 mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
 {
-       flags |= MT76_SET(MT_TXD_PKT_INFO_QSEL, qsel);
+       flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
        return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
 }
 
index 8d8ee03..da6faea 100644 (file)
@@ -45,8 +45,8 @@ mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data,
        val = mt76_rr(dev, MT_EFUSE_CTRL);
        val &= ~(MT_EFUSE_CTRL_AIN |
                 MT_EFUSE_CTRL_MODE);
-       val |= MT76_SET(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
-              MT76_SET(MT_EFUSE_CTRL_MODE, mode) |
+       val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+              FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
               MT_EFUSE_CTRL_KICK;
        mt76_wr(dev, MT_EFUSE_CTRL, val);
 
@@ -128,8 +128,8 @@ mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom)
        if (!field_valid(nic_conf0 >> 8))
                return;
 
-       if (MT76_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
-           MT76_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+       if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+           FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
                dev_err(dev->dev,
                        "Error: device has more than 1 RX/TX stream!\n");
 }
@@ -150,7 +150,7 @@ mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *eeprom)
 
        mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
        mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
-               MT76_SET(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+               FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
 
        return 0;
 }
@@ -176,7 +176,7 @@ mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom)
        u8 max_pwr;
 
        val = mt7601u_rr(dev, MT_TX_ALC_CFG_0);
-       max_pwr = MT76_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
+       max_pwr = FIELD_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
 
        if (mt7601u_has_tssi(dev, eeprom)) {
                mt7601u_set_channel_target_power(dev, eeprom, max_pwr);
index 8fa78d7..44d46e2 100644 (file)
@@ -108,8 +108,9 @@ static void mt7601u_init_usb_dma(struct mt7601u_dev *dev)
 {
        u32 val;
 
-       val = MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
-             MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+       val = FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+             FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT,
+                        MT_USB_AGGR_SIZE_LIMIT) |
              MT_USB_DMA_CFG_RX_BULK_EN |
              MT_USB_DMA_CFG_TX_BULK_EN;
        if (dev->in_max_packet == 512)
@@ -396,8 +397,9 @@ int mt7601u_init_hardware(struct mt7601u_dev *dev)
 
        mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
 
-       mt7601u_wr(dev, MT_TXOP_CTRL_CFG, MT76_SET(MT_TXOP_TRUN_EN, 0x3f) |
-                                         MT76_SET(MT_TXOP_EXT_CCA_DLY, 0x58));
+       mt7601u_wr(dev, MT_TXOP_CTRL_CFG,
+                  FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+                  FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
 
        ret = mt7601u_eeprom_init(dev);
        if (ret)
index e21c53e..3c57639 100644 (file)
 static void
 mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
 {
-       u8 idx = MT76_GET(MT_TXWI_RATE_MCS, rate);
+       u8 idx = FIELD_GET(MT_TXWI_RATE_MCS, rate);
 
        txrate->idx = 0;
        txrate->flags = 0;
        txrate->count = 1;
 
-       switch (MT76_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
+       switch (FIELD_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
        case MT_PHY_TYPE_OFDM:
                txrate->idx = idx + 4;
                return;
@@ -47,7 +47,7 @@ mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
                return;
        }
 
-       if (MT76_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
+       if (FIELD_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
                txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 
        if (rate & MT_TXWI_RATE_SGI)
@@ -125,9 +125,9 @@ u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
                bw = 0;
        }
 
-       rateval = MT76_SET(MT_RXWI_RATE_MCS, rate_idx);
-       rateval |= MT76_SET(MT_RXWI_RATE_PHY, phy);
-       rateval |= MT76_SET(MT_RXWI_RATE_BW, bw);
+       rateval = FIELD_PREP(MT_RXWI_RATE_MCS, rate_idx);
+       rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+       rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
        if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
                rateval |= MT_RXWI_RATE_SGI;
 
@@ -156,9 +156,9 @@ struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev)
        stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS);
        stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR);
        stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ);
-       stat.pktid = MT76_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
-       stat.wcid = MT76_GET(MT_TX_STAT_FIFO_WCID, val);
-       stat.rate = MT76_GET(MT_TX_STAT_FIFO_RATE, val);
+       stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
+       stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, val);
+       stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, val);
 
        return stat;
 }
@@ -270,7 +270,7 @@ void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval)
        }
 
        val &= ~MT_BEACON_TIME_CFG_INTVAL;
-       val |= MT76_SET(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+       val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
                MT_BEACON_TIME_CFG_TIMER_EN |
                MT_BEACON_TIME_CFG_SYNC_MODE |
                MT_BEACON_TIME_CFG_TBTT_EN;
@@ -349,8 +349,8 @@ mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
        u8 zmac[ETH_ALEN] = {};
        u32 attr;
 
-       attr = MT76_SET(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
-              MT76_SET(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+       attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+              FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
 
        mt76_wr(dev, MT_WCID_ATTR(idx), attr);
 
@@ -382,15 +382,15 @@ void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
        rcu_read_unlock();
 
        mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
-                  MT76_SET(MT_MAX_LEN_CFG_AMPDU, min_factor));
+                  FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
 }
 
 static void
 mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
 {
-       u8 idx = MT76_GET(MT_RXWI_RATE_MCS, rate);
+       u8 idx = FIELD_GET(MT_RXWI_RATE_MCS, rate);
 
-       switch (MT76_GET(MT_RXWI_RATE_PHY, rate)) {
+       switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
        case MT_PHY_TYPE_OFDM:
                if (WARN_ON(idx >= 8))
                        idx = 0;
@@ -436,7 +436,7 @@ mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
                          u16 rate, int rssi)
 {
        dev->bcn_freq_off = rxwi->freq_off;
-       dev->bcn_phy_mode = MT76_GET(MT_RXWI_RATE_PHY, rate);
+       dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
        dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
 }
 
@@ -458,7 +458,7 @@ u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
        u16 rate = le16_to_cpu(rxwi->rate);
        int rssi;
 
-       len = MT76_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+       len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
        if (len < 10)
                return 0;
 
@@ -542,8 +542,8 @@ int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
 
        val = mt7601u_rr(dev, MT_WCID_ATTR(idx));
        val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
-       val |= MT76_SET(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
-              MT76_SET(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+       val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+              FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
        val &= ~MT_WCID_ATTR_PAIRWISE;
        val |= MT_WCID_ATTR_PAIRWISE *
                !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
index e70dd95..43ebd46 100644 (file)
@@ -15,7 +15,6 @@
 #include "mt7601u.h"
 #include "mac.h"
 #include <linux/etherdevice.h>
-#include <linux/version.h>
 
 static int mt7601u_start(struct ieee80211_hw *hw)
 {
index 91c4b34..dbdfb3f 100644 (file)
@@ -43,8 +43,8 @@ static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
                                            u8 seq, enum mcu_cmd cmd)
 {
        WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
-                                    MT76_SET(MT_TXD_CMD_INFO_SEQ, seq) |
-                                    MT76_SET(MT_TXD_CMD_INFO_TYPE, cmd)));
+                                    FIELD_PREP(MT_TXD_CMD_INFO_SEQ, seq) |
+                                    FIELD_PREP(MT_TXD_CMD_INFO_TYPE, cmd)));
 }
 
 static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
@@ -100,13 +100,13 @@ static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
                        dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
                                urb_status);
 
-               if (MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
-                   MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+               if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+                   FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
                        return 0;
 
-               dev_err(dev->dev, "Error: MCU resp evt:%hhx seq:%hhx-%hhx!\n",
-                       MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
-                       seq, MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+               dev_err(dev->dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
+                       FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+                       seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
        }
 
        dev_err(dev->dev, "Error: %s timed out\n", __func__);
@@ -291,9 +291,9 @@ static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
        u32 val;
        int ret;
 
-       reg = cpu_to_le32(MT76_SET(MT_TXD_INFO_TYPE, DMA_PACKET) |
-                         MT76_SET(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
-                         MT76_SET(MT_TXD_INFO_LEN, len));
+       reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_PACKET) |
+                         FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+                         FIELD_PREP(MT_TXD_INFO_LEN, len));
        memcpy(buf.buf, &reg, sizeof(reg));
        memcpy(buf.buf + sizeof(reg), data, len);
        memset(buf.buf + sizeof(reg) + len, 0, 8);
index 428bd2f..c7ec404 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef MT7601U_H
 #define MT7601U_H
 
+#include <linux/bitfield.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
 #include <linux/mutex.h>
@@ -24,7 +25,6 @@
 #include <linux/debugfs.h>
 
 #include "regs.h"
-#include "util.h"
 
 #define MT_CALIBRATE_INTERVAL          (4 * HZ)
 
@@ -299,7 +299,7 @@ bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
 
 /* Compatibility with mt76 */
 #define mt76_rmw_field(_dev, _reg, _field, _val)       \
-       mt76_rmw(_dev, _reg, _field, MT76_SET(_field, _val))
+       mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
 
 static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset)
 {
index 1908af6..ca09a5d 100644 (file)
@@ -41,11 +41,12 @@ mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value)
                goto out;
        }
 
-       mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_DATA, value) |
-                                      MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
-                                      MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
-                                      MT_RF_CSR_CFG_WR |
-                                      MT_RF_CSR_CFG_KICK);
+       mt7601u_wr(dev, MT_RF_CSR_CFG,
+                  FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+                  FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+                  FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) |
+                  MT_RF_CSR_CFG_WR |
+                  MT_RF_CSR_CFG_KICK);
        trace_rf_write(dev, bank, offset, value);
 out:
        mutex_unlock(&dev->reg_atomic_mutex);
@@ -74,17 +75,18 @@ mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset)
        if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
                goto out;
 
-       mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
-                                      MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
-                                      MT_RF_CSR_CFG_KICK);
+       mt7601u_wr(dev, MT_RF_CSR_CFG,
+                  FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+                  FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) |
+                  MT_RF_CSR_CFG_KICK);
 
        if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
                goto out;
 
        val = mt7601u_rr(dev, MT_RF_CSR_CFG);
-       if (MT76_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
-           MT76_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
-               ret = MT76_GET(MT_RF_CSR_CFG_DATA, val);
+       if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
+           FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+               ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
                trace_rf_read(dev, bank, offset, ret);
        }
 out:
@@ -139,8 +141,8 @@ static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val)
        }
 
        mt7601u_wr(dev, MT_BBP_CSR_CFG,
-                  MT76_SET(MT_BBP_CSR_CFG_VAL, val) |
-                  MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+                  FIELD_PREP(MT_BBP_CSR_CFG_VAL, val) |
+                  FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) |
                   MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY);
        trace_bbp_write(dev, offset, val);
 out:
@@ -163,7 +165,7 @@ static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
                goto out;
 
        mt7601u_wr(dev, MT_BBP_CSR_CFG,
-                  MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+                  FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) |
                   MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY |
                   MT_BBP_CSR_CFG_READ);
 
@@ -171,8 +173,8 @@ static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
                goto out;
 
        val = mt7601u_rr(dev, MT_BBP_CSR_CFG);
-       if (MT76_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
-               ret = MT76_GET(MT_BBP_CSR_CFG_VAL, val);
+       if (FIELD_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
+               ret = FIELD_GET(MT_BBP_CSR_CFG_VAL, val);
                trace_bbp_read(dev, offset, ret);
        }
 out:
@@ -249,9 +251,9 @@ int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
                        /* bw40 */ { -2, 16, 34 }
                }
        };
-       int bw = MT76_GET(MT_RXWI_RATE_BW, rate);
-       int aux_lna = MT76_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
-       int lna_id = MT76_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
+       int bw = FIELD_GET(MT_RXWI_RATE_BW, rate);
+       int aux_lna = FIELD_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
+       int lna_id = FIELD_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
        int val;
 
        if (lna_id) /* LNA id can be 0, 2, 3. */
@@ -259,7 +261,7 @@ int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
 
        val = 8;
        val -= lna[aux_lna][bw][lna_id];
-       val -= MT76_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
+       val -= FIELD_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
        val -= dev->ee->lna_gain;
        val -= dev->ee->rssi_offset[0];
 
@@ -939,7 +941,7 @@ static int mt7601u_tssi_cal(struct mt7601u_dev *dev)
        dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr);
 
        val = mt7601u_rr(dev, MT_TX_ALC_CFG_1);
-       curr_pwr = s6_to_int(MT76_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
+       curr_pwr = s6_to_int(FIELD_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
        diff_pwr += curr_pwr;
        val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr);
        mt7601u_wr(dev, MT_TX_ALC_CFG_1, val);
index afd8978..27a429d 100644 (file)
 
 #include <linux/bitops.h>
 
-#ifndef GENMASK
-#define GENMASK(h, l)       (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
-#endif
-
 #define MT_ASIC_VERSION                        0x0000
 
 #define MT76XX_REV_E3          0x22
index a0a33dc..ad77bec 100644 (file)
@@ -175,11 +175,12 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
                ba_size = min_t(int, 63, ba_size);
                if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
                        ba_size = 0;
-               txwi->ack_ctl |= MT76_SET(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+               txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
 
-               txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
-                                         MT76_SET(MT_TXWI_FLAGS_MPDU_DENSITY,
-                                                  sta->ht_cap.ampdu_density));
+               txwi->flags =
+                       cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
+                                   FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+                                              sta->ht_cap.ampdu_density));
                if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
                        txwi->flags = 0;
        }
@@ -188,7 +189,7 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
 
        is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
        pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
-       pkt_len |= MT76_SET(MT_TXWI_LEN_PKTID, pkt_id);
+       pkt_len |= FIELD_PREP(MT_TXWI_LEN_PKTID, pkt_id);
        txwi->len_ctl = cpu_to_le16(pkt_len);
 
        return txwi;
@@ -285,9 +286,9 @@ int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        WARN_ON(cw_min > 0xf);
        WARN_ON(cw_max > 0xf);
 
-       val = MT76_SET(MT_EDCA_CFG_AIFSN, params->aifs) |
-             MT76_SET(MT_EDCA_CFG_CWMIN, cw_min) |
-             MT76_SET(MT_EDCA_CFG_CWMAX, cw_max);
+       val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+             FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+             FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
        /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
         *       a really long txop on AC0 (see connect.c:2009) but only on
         *       connect? When not connected should be 0.
@@ -295,7 +296,7 @@ int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        if (!hw_q)
                val |= 0x60;
        else
-               val |= MT76_SET(MT_EDCA_CFG_TXOP, params->txop);
+               val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
        mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
 
        val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.h b/drivers/net/wireless/mediatek/mt7601u/util.h
deleted file mode 100644 (file)
index b89140b..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76_UTIL_H
-#define __MT76_UTIL_H
-
-/*
- * Power of two check, this will check
- * if the mask that has been given contains and contiguous set of bits.
- * Note that we cannot use the is_power_of_2() function since this
- * check must be done at compile-time.
- */
-#define is_power_of_two(x)     ( !((x) & ((x)-1)) )
-#define low_bit_mask(x)                ( ((x)-1) & ~(x) )
-#define is_valid_mask(x)       is_power_of_two(1LU + (x) + low_bit_mask(x))
-
-/*
- * Macros to find first set bit in a variable.
- * These macros behave the same as the __ffs() functions but
- * the most important difference that this is done during
- * compile-time rather then run-time.
- */
-#define compile_ffs2(__x) \
-       __builtin_choose_expr(((__x) & 0x1), 0, 1)
-
-#define compile_ffs4(__x) \
-       __builtin_choose_expr(((__x) & 0x3), \
-                             (compile_ffs2((__x))), \
-                             (compile_ffs2((__x) >> 2) + 2))
-
-#define compile_ffs8(__x) \
-       __builtin_choose_expr(((__x) & 0xf), \
-                             (compile_ffs4((__x))), \
-                             (compile_ffs4((__x) >> 4) + 4))
-
-#define compile_ffs16(__x) \
-       __builtin_choose_expr(((__x) & 0xff), \
-                             (compile_ffs8((__x))), \
-                             (compile_ffs8((__x) >> 8) + 8))
-
-#define compile_ffs32(__x) \
-       __builtin_choose_expr(((__x) & 0xffff), \
-                             (compile_ffs16((__x))), \
-                             (compile_ffs16((__x) >> 16) + 16))
-
-/*
- * This macro will check the requirements for the FIELD{8,16,32} macros
- * The mask should be a constant non-zero contiguous set of bits which
- * does not exceed the given typelimit.
- */
-#define FIELD_CHECK(__mask) \
-       BUILD_BUG_ON(!(__mask) || !is_valid_mask(__mask))
-
-#define MT76_SET(_mask, _val)                                          \
-       ({                                                              \
-               FIELD_CHECK(_mask);                                     \
-               (((u32) (_val)) << compile_ffs32(_mask)) & _mask;       \
-       })
-
-#define MT76_GET(_mask, _val)                                          \
-       ({                                                              \
-               FIELD_CHECK(_mask);                                     \
-               (u32) (((_val) & _mask) >> compile_ffs32(_mask));       \
-       })
-
-#endif
index 7cf26c6..6005e14 100644 (file)
@@ -831,8 +831,10 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
        rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
                                        sizeof(struct usb_anchor),
                                        GFP_KERNEL);
-       if (!rt2x00dev->anchor)
+       if (!rt2x00dev->anchor) {
+               retval = -ENOMEM;
                goto exit_free_reg;
+       }
 
        init_usb_anchor(rt2x00dev->anchor);
        return 0;
index 4341d56..1f54b89 100644 (file)
@@ -43,6 +43,7 @@
 
 #define TX_TOTAL_PAGE_NUM              0xf8
 #define TX_TOTAL_PAGE_NUM_8192E                0xf3
+#define TX_TOTAL_PAGE_NUM_8723B                0xf7
 /* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
 #define TX_PAGE_NUM_PUBQ               0xe7
 #define TX_PAGE_NUM_HI_PQ              0x0c
 #define TX_PAGE_NUM_LO_PQ_8192E                0x0c
 #define TX_PAGE_NUM_NORM_PQ_8192E      0x00
 
+#define TX_PAGE_NUM_PUBQ_8723B         0xe7
+#define TX_PAGE_NUM_HI_PQ_8723B                0x0c
+#define TX_PAGE_NUM_LO_PQ_8723B                0x02
+#define TX_PAGE_NUM_NORM_PQ_8723B      0x02
+
 #define RTL_FW_PAGE_SIZE               4096
 #define RTL8XXXU_FIRMWARE_POLL_MAX     1000
 
@@ -1330,11 +1336,17 @@ struct rtl8xxxu_fileops {
                                  u32 ramask, int sgi);
        void (*report_connect) (struct rtl8xxxu_priv *priv,
                                u8 macid, bool connect);
+       void (*fill_txdesc) (struct ieee80211_hdr *hdr,
+                            struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
+                            u16 rate_flag, bool sgi, bool short_preamble,
+                            bool ampdu_enable);
        int writeN_block_size;
        int rx_agg_buf_size;
        char tx_desc_size;
        char rx_desc_size;
-       char has_s0s1;
+       u8 has_s0s1:1;
+       u8 has_tx_report:1;
+       u8 gen2_thermal_meter:1;
        u32 adda_1t_init;
        u32 adda_1t_path_on;
        u32 adda_2t_path_on_a;
@@ -1421,6 +1433,14 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
 int rtl8xxxu_gen2_channel_to_group(int channel);
 bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
                                      int result[][8], int c1, int c2);
+void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
+                            struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
+                            u16 rate_flag, bool sgi, bool short_preamble,
+                            bool ampdu_enable);
+void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
+                            struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate,
+                            u16 rate_flag, bool sgi, bool short_preamble,
+                            bool ampdu_enable);
 
 extern struct rtl8xxxu_fileops rtl8192cu_fops;
 extern struct rtl8xxxu_fileops rtl8192eu_fops;
index 69d1a14..f9e2050 100644 (file)
@@ -567,6 +567,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
        .set_tx_power = rtl8xxxu_gen1_set_tx_power,
        .update_rate_mask = rtl8xxxu_update_rate_mask,
        .report_connect = rtl8xxxu_gen1_report_connect,
+       .fill_txdesc = rtl8xxxu_fill_txdesc_v1,
        .writeN_block_size = 128,
        .rx_agg_buf_size = 16000,
        .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
@@ -579,5 +580,9 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
        .pbp_rx = PBP_PAGE_SIZE_128,
        .pbp_tx = PBP_PAGE_SIZE_128,
        .mactable = rtl8xxxu_gen1_mac_init_table,
+       .total_page_num = TX_TOTAL_PAGE_NUM,
+       .page_num_hi = TX_PAGE_NUM_HI_PQ,
+       .page_num_lo = TX_PAGE_NUM_LO_PQ,
+       .page_num_norm = TX_PAGE_NUM_NORM_PQ,
 };
 #endif
index 9a1994f..841522e 100644 (file)
@@ -1501,10 +1501,12 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
        .set_tx_power = rtl8192e_set_tx_power,
        .update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
        .report_connect = rtl8xxxu_gen2_report_connect,
+       .fill_txdesc = rtl8xxxu_fill_txdesc_v2,
        .writeN_block_size = 128,
        .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
        .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
        .has_s0s1 = 0,
+       .gen2_thermal_meter = 1,
        .adda_1t_init = 0x0fc01616,
        .adda_1t_path_on = 0x0fc01616,
        .adda_2t_path_on_a = 0x0fc01616,
index 686c551..aef3730 100644 (file)
@@ -384,6 +384,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
        .set_tx_power = rtl8xxxu_gen1_set_tx_power,
        .update_rate_mask = rtl8xxxu_update_rate_mask,
        .report_connect = rtl8xxxu_gen1_report_connect,
+       .fill_txdesc = rtl8xxxu_fill_txdesc_v1,
        .writeN_block_size = 1024,
        .rx_agg_buf_size = 16000,
        .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
@@ -396,4 +397,8 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
        .pbp_rx = PBP_PAGE_SIZE_128,
        .pbp_tx = PBP_PAGE_SIZE_128,
        .mactable = rtl8xxxu_gen1_mac_init_table,
+       .total_page_num = TX_TOTAL_PAGE_NUM,
+       .page_num_hi = TX_PAGE_NUM_HI_PQ,
+       .page_num_lo = TX_PAGE_NUM_LO_PQ,
+       .page_num_norm = TX_PAGE_NUM_NORM_PQ,
 };
index 9d45afb..6c086b5 100644 (file)
@@ -1662,10 +1662,13 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
        .set_tx_power = rtl8723b_set_tx_power,
        .update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
        .report_connect = rtl8xxxu_gen2_report_connect,
+       .fill_txdesc = rtl8xxxu_fill_txdesc_v2,
        .writeN_block_size = 1024,
        .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
        .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
        .has_s0s1 = 1,
+       .has_tx_report = 1,
+       .gen2_thermal_meter = 1,
        .adda_1t_init = 0x01c00014,
        .adda_1t_path_on = 0x01c00014,
        .adda_2t_path_on_a = 0x01c00014,
@@ -1674,4 +1677,8 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
        .pbp_rx = PBP_PAGE_SIZE_256,
        .pbp_tx = PBP_PAGE_SIZE_256,
        .mactable = rtl8723b_mac_init_table,
+       .total_page_num = TX_TOTAL_PAGE_NUM_8723B,
+       .page_num_hi = TX_PAGE_NUM_HI_PQ_8723B,
+       .page_num_lo = TX_PAGE_NUM_LO_PQ_8723B,
+       .page_num_norm = TX_PAGE_NUM_NORM_PQ_8723B,
 };
index 77048db..ca92022 100644 (file)
@@ -894,7 +894,7 @@ int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
        return retval;
 }
 
-int
+static int
 rtl8xxxu_gen1_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c, int len)
 {
        struct device *dev = &priv->udev->dev;
@@ -3847,28 +3847,6 @@ void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv)
        rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
 }
 
-static void rtl8xxxu_old_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
-{
-       u8 val8;
-       u32 val32;
-
-       if (priv->ep_tx_normal_queue)
-               val8 = TX_PAGE_NUM_NORM_PQ;
-       else
-               val8 = 0;
-
-       rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
-
-       val32 = (TX_PAGE_NUM_PUBQ << RQPN_PUB_PQ_SHIFT) | RQPN_LOAD;
-
-       if (priv->ep_tx_high_queue)
-               val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
-       if (priv->ep_tx_low_queue)
-               val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
-
-       rtl8xxxu_write32(priv, REG_RQPN, val32);
-}
-
 static void rtl8xxxu_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
 {
        struct rtl8xxxu_fileops *fops = priv->fops;
@@ -3929,12 +3907,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
                goto exit;
        }
 
-       if (!macpower) {
-               if (priv->fops->total_page_num)
-                       rtl8xxxu_init_queue_reserved_page(priv);
-               else
-                       rtl8xxxu_old_init_queue_reserved_page(priv);
-       }
+       if (!macpower)
+               rtl8xxxu_init_queue_reserved_page(priv);
 
        ret = rtl8xxxu_init_queue_priority(priv);
        dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
@@ -3947,11 +3921,11 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
        rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, priv->fops->trxff_boundary);
 
        ret = rtl8xxxu_download_firmware(priv);
-       dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
+       dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
        if (ret)
                goto exit;
        ret = rtl8xxxu_start_firmware(priv);
-       dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret);
+       dev_dbg(dev, "%s: start_firmware %i\n", __func__, ret);
        if (ret)
                goto exit;
 
@@ -3994,13 +3968,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
                /*
                 * Set TX buffer boundary
                 */
-               if (priv->rtl_chip == RTL8192E)
-                       val8 = TX_TOTAL_PAGE_NUM_8192E + 1;
-               else
-                       val8 = TX_TOTAL_PAGE_NUM + 1;
-
-               if (priv->rtl_chip == RTL8723B)
-                       val8 -= 1;
+               val8 = priv->fops->total_page_num + 1;
 
                rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
                rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
@@ -4032,10 +4000,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
                priv->fops->usb_quirks(priv);
 
                /*
-                * Presumably this is for 8188EU as well
-                * Enable TX report and TX report timer
+                * Enable TX report and TX report timer for 8723bu/8188eu/...
                 */
-               if (priv->rtl_chip == RTL8723B) {
+               if (priv->fops->has_tx_report) {
                        val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
                        val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
                        rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
@@ -4228,7 +4195,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
        /*
         * This should enable thermal meter
         */
-       if (priv->fops->tx_desc_size == sizeof(struct rtl8xxxu_txdesc40))
+       if (priv->fops->gen2_thermal_meter)
                rtl8xxxu_write_rfreg(priv,
                                     RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
        else
@@ -4783,6 +4750,113 @@ static void rtl8xxxu_dump_action(struct device *dev,
        }
 }
 
+/*
+ * Fill in v1 (gen1) specific TX descriptor bits.
+ * This format is used on 8188cu/8192cu/8723au
+ */
+void
+rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
+                       struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
+                       u16 rate_flag, bool sgi, bool short_preamble,
+                       bool ampdu_enable)
+{
+       u16 seq_number;
+
+       seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+       tx_desc->txdw5 = cpu_to_le32(rate);
+
+       if (ieee80211_is_data(hdr->frame_control))
+               tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
+
+       tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
+
+       if (ampdu_enable)
+               tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
+       else
+               tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
+
+       if (ieee80211_is_mgmt(hdr->frame_control)) {
+               tx_desc->txdw5 = cpu_to_le32(rate);
+               tx_desc->txdw4 |= cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
+               tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
+               tx_desc->txdw5 |= cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
+       }
+
+       if (ieee80211_is_data_qos(hdr->frame_control))
+               tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
+
+       if (short_preamble)
+               tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
+
+       if (sgi)
+               tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
+
+       if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+               /*
+                * Use RTS rate 24M - does the mac80211 tell
+                * us which to use?
+                */
+               tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M <<
+                                             TXDESC32_RTS_RATE_SHIFT);
+               tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
+               tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
+       }
+}
+
+/*
+ * Fill in v2 (gen2) specific TX descriptor bits.
+ * This format is used on 8192eu/8723bu
+ */
+void
+rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
+                       struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate,
+                       u16 rate_flag, bool sgi, bool short_preamble,
+                       bool ampdu_enable)
+{
+       struct rtl8xxxu_txdesc40 *tx_desc40;
+       u16 seq_number;
+
+       tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc32;
+
+       seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+       tx_desc40->txdw4 = cpu_to_le32(rate);
+       if (ieee80211_is_data(hdr->frame_control)) {
+               tx_desc40->txdw4 |= cpu_to_le32(0x1f <<
+                                               TXDESC40_DATA_RATE_FB_SHIFT);
+       }
+
+       tx_desc40->txdw9 = cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
+
+       if (ampdu_enable)
+               tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
+       else
+               tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
+
+       if (ieee80211_is_mgmt(hdr->frame_control)) {
+               tx_desc40->txdw4 = cpu_to_le32(rate);
+               tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_USE_DRIVER_RATE);
+               tx_desc40->txdw4 |=
+                       cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT);
+               tx_desc40->txdw4 |= cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
+       }
+
+       if (short_preamble)
+               tx_desc40->txdw5 |= cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
+
+       if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+               /*
+                * Use RTS rate 24M - does the mac80211 tell
+                * us which to use?
+                */
+               tx_desc40->txdw4 |= cpu_to_le32(DESC_RATE_24M <<
+                                               TXDESC40_RTS_RATE_SHIFT);
+               tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
+               tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
+       }
+}
+
 static void rtl8xxxu_tx(struct ieee80211_hw *hw,
                        struct ieee80211_tx_control *control,
                        struct sk_buff *skb)
@@ -4792,7 +4866,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
        struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
        struct rtl8xxxu_priv *priv = hw->priv;
        struct rtl8xxxu_txdesc32 *tx_desc;
-       struct rtl8xxxu_txdesc40 *tx_desc40;
        struct rtl8xxxu_tx_urb *tx_urb;
        struct ieee80211_sta *sta = NULL;
        struct ieee80211_vif *vif = tx_info->control.vif;
@@ -4803,7 +4876,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
        u16 rate_flag = tx_info->control.rates[0].flags;
        int tx_desc_size = priv->fops->tx_desc_size;
        int ret;
-       bool usedesc40, ampdu_enable;
+       bool usedesc40, ampdu_enable, sgi = false, short_preamble = false;
 
        if (skb_headroom(skb) < tx_desc_size) {
                dev_warn(dev,
@@ -4881,107 +4954,26 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
                }
        }
 
-       if (rate_flag & IEEE80211_TX_RC_MCS)
+       if (rate_flag & IEEE80211_TX_RC_MCS &&
+           !ieee80211_is_mgmt(hdr->frame_control))
                rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
        else
                rate = tx_rate->hw_value;
 
-       seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
-       if (!usedesc40) {
-               tx_desc->txdw5 = cpu_to_le32(rate);
-
-               if (ieee80211_is_data(hdr->frame_control))
-                       tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
-
-               tx_desc->txdw3 =
-                       cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
-
-               if (ampdu_enable)
-                       tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
-               else
-                       tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
-
-               if (ieee80211_is_mgmt(hdr->frame_control)) {
-                       tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
-                       tx_desc->txdw4 |=
-                               cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
-                       tx_desc->txdw5 |=
-                               cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
-                       tx_desc->txdw5 |=
-                               cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
-               }
-
-               if (ieee80211_is_data_qos(hdr->frame_control))
-                       tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
+       if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
+           (ieee80211_is_data_qos(hdr->frame_control) &&
+            sta && sta->ht_cap.cap &
+            (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20)))
+               sgi = true;
 
-               if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
-                   (sta && vif && vif->bss_conf.use_short_preamble))
-                       tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
+       if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
+           (sta && vif && vif->bss_conf.use_short_preamble))
+               short_preamble = true;
 
-               if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
-                   (ieee80211_is_data_qos(hdr->frame_control) &&
-                    sta && sta->ht_cap.cap &
-                    (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
-                       tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
-               }
-
-               if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
-                       /*
-                        * Use RTS rate 24M - does the mac80211 tell
-                        * us which to use?
-                        */
-                       tx_desc->txdw4 |=
-                               cpu_to_le32(DESC_RATE_24M <<
-                                           TXDESC32_RTS_RATE_SHIFT);
-                       tx_desc->txdw4 |=
-                               cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
-                       tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
-               }
-       } else {
-               tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc;
-
-               tx_desc40->txdw4 = cpu_to_le32(rate);
-               if (ieee80211_is_data(hdr->frame_control)) {
-                       tx_desc->txdw4 |=
-                               cpu_to_le32(0x1f <<
-                                           TXDESC40_DATA_RATE_FB_SHIFT);
-               }
-
-               tx_desc40->txdw9 =
-                       cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
-
-               if (ampdu_enable)
-                       tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
-               else
-                       tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
-
-               if (ieee80211_is_mgmt(hdr->frame_control)) {
-                       tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value);
-                       tx_desc40->txdw3 |=
-                               cpu_to_le32(TXDESC40_USE_DRIVER_RATE);
-                       tx_desc40->txdw4 |=
-                               cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT);
-                       tx_desc40->txdw4 |=
-                               cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
-               }
-
-               if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
-                   (sta && vif && vif->bss_conf.use_short_preamble))
-                       tx_desc40->txdw5 |=
-                               cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
+       seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
 
-               if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
-                       /*
-                        * Use RTS rate 24M - does the mac80211 tell
-                        * us which to use?
-                        */
-                       tx_desc->txdw4 |=
-                               cpu_to_le32(DESC_RATE_24M <<
-                                           TXDESC40_RTS_RATE_SHIFT);
-                       tx_desc->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
-                       tx_desc->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
-               }
-       }
+       priv->fops->fill_txdesc(hdr, tx_desc, rate, rate_flag,
+                               sgi, short_preamble, ampdu_enable);
 
        rtl8xxxu_calc_tx_desc_csum(tx_desc);
 
@@ -5704,7 +5696,7 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 
        switch (action) {
        case IEEE80211_AMPDU_TX_START:
-               dev_info(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
+               dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
                ampdu_factor = sta->ht_cap.ampdu_factor;
                ampdu_density = sta->ht_cap.ampdu_density;
                rtl8xxxu_set_ampdu_factor(priv, ampdu_factor);
@@ -5714,21 +5706,21 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        ampdu_factor, ampdu_density);
                break;
        case IEEE80211_AMPDU_TX_STOP_FLUSH:
-               dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
+               dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
                rtl8xxxu_set_ampdu_factor(priv, 0);
                rtl8xxxu_set_ampdu_min_space(priv, 0);
                break;
        case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
-               dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
+               dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
                         __func__);
                rtl8xxxu_set_ampdu_factor(priv, 0);
                rtl8xxxu_set_ampdu_min_space(priv, 0);
                break;
        case IEEE80211_AMPDU_RX_START:
-               dev_info(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
+               dev_dbg(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
                break;
        case IEEE80211_AMPDU_RX_STOP:
-               dev_info(dev, "%s: IEEE80211_AMPDU_RX_STOP\n", __func__);
+               dev_dbg(dev, "%s: IEEE80211_AMPDU_RX_STOP\n", __func__);
                break;
        default:
                break;
@@ -5947,7 +5939,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
        struct ieee80211_hw *hw;
        struct usb_device *udev;
        struct ieee80211_supported_band *sband;
-       int ret = 0;
+       int ret;
        int untested = 1;
 
        udev = usb_get_dev(interface_to_usbdev(interface));
@@ -5971,6 +5963,18 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
                if (id->idProduct == 0x1004)
                        untested = 0;
                break;
+       case 0x20f4:
+               if (id->idProduct == 0x648b)
+                       untested = 0;
+               break;
+       case 0x2001:
+               if (id->idProduct == 0x3308)
+                       untested = 0;
+               break;
+       case 0x2357:
+               if (id->idProduct == 0x0109)
+                       untested = 0;
+               break;
        default:
                break;
        }
@@ -5987,6 +5991,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
        hw = ieee80211_alloc_hw(sizeof(struct rtl8xxxu_priv), &rtl8xxxu_ops);
        if (!hw) {
                ret = -ENOMEM;
+               priv = NULL;
                goto exit;
        }
 
@@ -6035,6 +6040,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
        }
 
        ret = rtl8xxxu_init_device(hw);
+       if (ret)
+               goto exit;
 
        hw->wiphy->max_scan_ssids = 1;
        hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
@@ -6085,9 +6092,20 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
                goto exit;
        }
 
+       return 0;
+
 exit:
-       if (ret < 0)
-               usb_put_dev(udev);
+       usb_set_intfdata(interface, NULL);
+
+       if (priv) {
+               kfree(priv->fw_data);
+               mutex_destroy(&priv->usb_buf_mutex);
+               mutex_destroy(&priv->h2c_mutex);
+       }
+       usb_put_dev(udev);
+
+       ieee80211_free_hw(hw);
+
        return ret;
 }
 
@@ -6111,6 +6129,11 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface)
        mutex_destroy(&priv->usb_buf_mutex);
        mutex_destroy(&priv->h2c_mutex);
 
+       if (priv->udev->state != USB_STATE_NOTATTACHED) {
+               dev_info(&priv->udev->dev,
+                        "Device still attached, trying to reset\n");
+               usb_reset_device(priv->udev);
+       }
        usb_put_dev(priv->udev);
        ieee80211_free_hw(hw);
 }
@@ -6124,6 +6147,9 @@ static struct usb_device_id dev_table[] = {
        .driver_info = (unsigned long)&rtl8723au_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818b, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192eu_fops},
+/* Tested by Myckel Habets */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0109, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192eu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xb720, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8723bu_fops},
 #ifdef CONFIG_RTL8XXXU_UNTESTED
@@ -6140,6 +6166,12 @@ static struct usb_device_id dev_table[] = {
 /* Tested by Andrea Merello */
 {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Jocelyn Mayer */
+{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x648b, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Stefano Bravi */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192cu_fops},
 /* Currently untested 8188 series devices */
 {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
@@ -6187,8 +6219,6 @@ static struct usb_device_id dev_table[] = {
        .driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
-       .driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
@@ -6199,8 +6229,6 @@ static struct usb_device_id dev_table[] = {
        .driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xed17, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x648b, 0xff, 0xff, 0xff),
-       .driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0090, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
index 921c565..3555a2f 100644 (file)
 #define REG_HMBOX_EXT_1                        0x008a
 #define REG_HMBOX_EXT_2                        0x008c
 #define REG_HMBOX_EXT_3                        0x008e
+
 /* Interrupt registers for 8192e/8723bu/8812 */
 #define REG_HIMR0                      0x00b0
+#define         IMR0_TXCCK                     BIT(30) /* TXRPT interrupt when CCX bit
+                                                  of the packet is set */
+#define         IMR0_PSTIMEOUT                 BIT(29) /* Power Save Time Out Int */
+#define         IMR0_GTINT4                    BIT(28) /* Set when GTIMER4 expires */
+#define         IMR0_GTINT3                    BIT(27) /* Set when GTIMER3 expires */
+#define         IMR0_TBDER                     BIT(26) /* Transmit Beacon0 Error */
+#define         IMR0_TBDOK                     BIT(25) /* Transmit Beacon0 OK */
+#define         IMR0_TSF_BIT32_TOGGLE          BIT(24) /* TSF Timer BIT32 toggle
+                                                  indication interrupt */
+#define         IMR0_BCNDMAINT0                BIT(20) /* Beacon DMA Interrupt 0 */
+#define         IMR0_BCNDERR0                  BIT(16) /* Beacon Queue DMA Error 0 */
+#define         IMR0_HSISR_IND_ON_INT          BIT(15) /* HSISR Indicator (HSIMR &
+                                                  HSISR is true) */
+#define         IMR0_BCNDMAINT_E               BIT(14) /* Beacon DMA Interrupt
+                                                  Extension for Win7 */
+#define         IMR0_ATIMEND                   BIT(12) /* CTWidnow End or
+                                                  ATIM Window End */
+#define         IMR0_HISR1_IND_INT             BIT(11) /* HISR1 Indicator
+                                                  (HISR1 & HIMR1 is true) */
+#define         IMR0_C2HCMD                    BIT(10) /* CPU to Host Command INT
+                                                  Status, Write 1 to clear */
+#define         IMR0_CPWM2                     BIT(9)  /* CPU power Mode exchange INT
+                                                  Status, Write 1 to clear */
+#define         IMR0_CPWM                      BIT(8)  /* CPU power Mode exchange INT
+                                                  Status, Write 1 to clear */
+#define         IMR0_HIGHDOK                   BIT(7)  /* High Queue DMA OK */
+#define         IMR0_MGNTDOK                   BIT(6)  /* Management Queue DMA OK */
+#define         IMR0_BKDOK                     BIT(5)  /* AC_BK DMA OK */
+#define         IMR0_BEDOK                     BIT(4)  /* AC_BE DMA OK */
+#define         IMR0_VIDOK                     BIT(3)  /* AC_VI DMA OK */
+#define         IMR0_VODOK                     BIT(2)  /* AC_VO DMA OK */
+#define         IMR0_RDU                       BIT(1)  /* Rx Descriptor Unavailable */
+#define         IMR0_ROK                       BIT(0)  /* Receive DMA OK */
 #define REG_HISR0                      0x00b4
 #define REG_HIMR1                      0x00b8
+#define         IMR1_BCNDMAINT7                BIT(27) /* Beacon DMA Interrupt 7 */
+#define         IMR1_BCNDMAINT6                BIT(26) /* Beacon DMA Interrupt 6 */
+#define         IMR1_BCNDMAINT5                BIT(25) /* Beacon DMA Interrupt 5 */
+#define         IMR1_BCNDMAINT4                BIT(24) /* Beacon DMA Interrupt 4 */
+#define         IMR1_BCNDMAINT3                BIT(23) /* Beacon DMA Interrupt 3 */
+#define         IMR1_BCNDMAINT2                BIT(22) /* Beacon DMA Interrupt 2 */
+#define         IMR1_BCNDMAINT1                BIT(21) /* Beacon DMA Interrupt 1 */
+#define         IMR1_BCNDERR7                  BIT(20) /* Beacon Queue DMA Err Int 7 */
+#define         IMR1_BCNDERR6                  BIT(19) /* Beacon Queue DMA Err Int 6 */
+#define         IMR1_BCNDERR5                  BIT(18) /* Beacon Queue DMA Err Int 5 */
+#define         IMR1_BCNDERR4                  BIT(17) /* Beacon Queue DMA Err Int 4 */
+#define         IMR1_BCNDERR3                  BIT(16) /* Beacon Queue DMA Err Int 3 */
+#define         IMR1_BCNDERR2                  BIT(15) /* Beacon Queue DMA Err Int 2 */
+#define         IMR1_BCNDERR1                  BIT(14) /* Beacon Queue DMA Err Int 1 */
+#define         IMR1_ATIMEND_E                 BIT(13) /* ATIM Window End Extension
+                                                  for Win7 */
+#define         IMR1_TXERR                     BIT(11) /* Tx Error Flag Int Status,
+                                                  write 1 to clear */
+#define         IMR1_RXERR                     BIT(10) /* Rx Error Flag Int Status,
+                                                  write 1 to clear */
+#define         IMR1_TXFOVW                    BIT(9)  /* Transmit FIFO Overflow */
+#define         IMR1_RXFOVW                    BIT(8)  /* Receive FIFO Overflow */
 #define REG_HISR1                      0x00bc
 
 /*  Host suspend counter on FPGA platform */
 #define  FPGA_RF_MODE_OFDM             BIT(25)
 
 #define REG_FPGA0_TX_INFO              0x0804
+#define  FPGA0_TX_INFO_OFDM_PATH_A     BIT(0)
+#define  FPGA0_TX_INFO_OFDM_PATH_B     BIT(1)
+#define  FPGA0_TX_INFO_OFDM_PATH_C     BIT(2)
+#define  FPGA0_TX_INFO_OFDM_PATH_D     BIT(3)
 #define REG_FPGA0_PSD_FUNC             0x0808
 #define REG_FPGA0_TX_GAIN              0x080c
 #define REG_FPGA0_RF_TIMING1           0x0810
index 41f77f8..7aee5eb 100644 (file)
@@ -1135,7 +1135,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                                        mac->mode = WIRELESS_MODE_AC_24G;
                        }
 
-                       if (vif->type == NL80211_IFTYPE_STATION && sta)
+                       if (vif->type == NL80211_IFTYPE_STATION)
                                rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
                        rcu_read_unlock();
 
index 3524441..6ee6bf8 100644 (file)
@@ -345,9 +345,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
                return &rtl_regdom_no_midband;
        case COUNTRY_CODE_IC:
                return &rtl_regdom_11;
-       case COUNTRY_CODE_ETSI:
        case COUNTRY_CODE_TELEC_NETGEAR:
                return &rtl_regdom_60_64;
+       case COUNTRY_CODE_ETSI:
        case COUNTRY_CODE_SPAIN:
        case COUNTRY_CODE_FRANCE:
        case COUNTRY_CODE_ISRAEL:
@@ -406,6 +406,8 @@ static u8 channel_plan_to_country_code(u8 channelplan)
                return COUNTRY_CODE_WORLD_WIDE_13;
        case 0x22:
                return COUNTRY_CODE_IC;
+       case 0x25:
+               return COUNTRY_CODE_ETSI;
        case 0x32:
                return COUNTRY_CODE_TELEC_NETGEAR;
        case 0x41:
index 47e32cb..e7b11b4 100644 (file)
@@ -280,7 +280,7 @@ static struct rtl_mod_params rtl88ee_mod_params = {
        .debug = DBG_EMERG,
 };
 
-static struct rtl_hal_cfg rtl88ee_hal_cfg = {
+static const struct rtl_hal_cfg rtl88ee_hal_cfg = {
        .bar_id = 2,
        .write_readback = true,
        .name = "rtl88e_pci",
index 4780bdc..87aa209 100644 (file)
@@ -258,7 +258,7 @@ static struct rtl_mod_params rtl92ce_mod_params = {
        .debug = DBG_EMERG,
 };
 
-static struct rtl_hal_cfg rtl92ce_hal_cfg = {
+static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
        .bar_id = 2,
        .write_readback = true,
        .name = "rtl92c_pci",
index b0f6324..5720551 100644 (file)
@@ -1757,7 +1757,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
                return;
 
        if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
-               return;
+               goto exit;
 
        _rtl92de_efuse_update_chip_version(hw);
        _rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
@@ -1790,6 +1790,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
                break;
        }
        rtlefuse->txpwr_fromeprom = true;
+exit:
        kfree(hwinfo);
 }
 
index d334d2a..2a4810d 100644 (file)
@@ -588,7 +588,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
                                 * setting. */
                                udelay(1);
                                RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                                        "The Rtl819XAGCTAB_Array_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
+                                        "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
                                         agctab_array_table[i],
                                         agctab_array_table[i + 1]);
                        }
@@ -604,7 +604,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
                                         * setting. */
                                        udelay(1);
                                        RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                                                "The Rtl819XAGCTAB_Array_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
+                                                "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
                                                 agctab_array_table[i],
                                                 agctab_array_table[i + 1]);
                                }
@@ -620,7 +620,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
                                         * setting. */
                                        udelay(1);
                                        RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                                                "The Rtl819XAGCTAB_5GArray_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
+                                                "The Rtl819XAGCTAB_5GArray_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
                                                 agctab_5garray_table[i],
                                                 agctab_5garray_table[i + 1]);
                                }
index c6e09a1..0538a4d 100644 (file)
@@ -262,7 +262,7 @@ static struct rtl_mod_params rtl92de_mod_params = {
        .debug = DBG_EMERG,
 };
 
-static struct rtl_hal_cfg rtl92de_hal_cfg = {
+static const struct rtl_hal_cfg rtl92de_hal_cfg = {
        .bar_id = 2,
        .write_readback = true,
        .name = "rtl8192de",
index c31c6bf..ac299cb 100644 (file)
@@ -262,7 +262,7 @@ static struct rtl_mod_params rtl92ee_mod_params = {
        .debug = DBG_EMERG,
 };
 
-static struct rtl_hal_cfg rtl92ee_hal_cfg = {
+static const struct rtl_hal_cfg rtl92ee_hal_cfg = {
        .bar_id = 2,
        .write_readback = true,
        .name = "rtl92ee_pci",
index 31baca4..5e8e02d 100644 (file)
@@ -306,7 +306,7 @@ static struct rtl_mod_params rtl92se_mod_params = {
 
 /* Because memory R/W bursting will cause system hang/crash
  * for 92se, so we don't read back after every write action */
-static struct rtl_hal_cfg rtl92se_hal_cfg = {
+static const struct rtl_hal_cfg rtl92se_hal_cfg = {
        .bar_id = 1,
        .write_readback = false,
        .name = "rtl92s_pci",
index b88c7ee..ba30efc 100644 (file)
@@ -1654,7 +1654,7 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
                        rtlefuse->autoload_failflag, hwinfo);
 
        if (rtlhal->oem_id != RT_CID_DEFAULT)
-               return;
+               goto exit;
 
        switch (rtlefuse->eeprom_oemid) {
        case EEPROM_CID_DEFAULT:
index ff49a8c..89c828a 100644 (file)
@@ -276,7 +276,7 @@ static struct rtl_mod_params rtl8723e_mod_params = {
        .disable_watchdog = false,
 };
 
-static struct rtl_hal_cfg rtl8723e_hal_cfg = {
+static const struct rtl_hal_cfg rtl8723e_hal_cfg = {
        .bar_id = 2,
        .write_readback = true,
        .name = "rtl8723e_pci",
index 2101793..20b53f0 100644 (file)
@@ -276,7 +276,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
        .ant_sel = 0,
 };
 
-static struct rtl_hal_cfg rtl8723be_hal_cfg = {
+static const struct rtl_hal_cfg rtl8723be_hal_cfg = {
        .bar_id = 2,
        .write_readback = true,
        .name = "rtl8723be_pci",
index 4159f9b..22f687b 100644 (file)
@@ -316,7 +316,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = {
        .disable_watchdog = 0,
 };
 
-static struct rtl_hal_cfg rtl8821ae_hal_cfg = {
+static const struct rtl_hal_cfg rtl8821ae_hal_cfg = {
        .bar_id = 2,
        .write_readback = true,
        .name = "rtl8821ae_pci",
index 4be0409..b5525a3 100644 (file)
@@ -309,3 +309,32 @@ out:
        kfree(acx);
        return ret;
 }
+
+int wl18xx_acx_time_sync_cfg(struct wl1271 *wl)
+{
+       struct acx_time_sync_cfg *acx;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx time sync cfg: mode %d, addr: %pM",
+                    wl->conf.sg.params[WL18XX_CONF_SG_TIME_SYNC],
+                    wl->zone_master_mac_addr);
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       acx->sync_mode = wl->conf.sg.params[WL18XX_CONF_SG_TIME_SYNC];
+       memcpy(acx->zone_mac_addr, wl->zone_master_mac_addr, ETH_ALEN);
+
+       ret = wl1271_cmd_configure(wl, ACX_TIME_SYNC_CFG,
+                                  acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx time sync cfg failed: %d", ret);
+               goto out;
+       }
+out:
+       kfree(acx);
+       return ret;
+}
index 342a299..2edbbbf 100644 (file)
@@ -37,6 +37,7 @@ enum {
        ACX_RX_BA_FILTER                 = 0x0058,
        ACX_AP_SLEEP_CFG                 = 0x0059,
        ACX_DYNAMIC_TRACES_CFG           = 0x005A,
+       ACX_TIME_SYNC_CFG                = 0x005B,
 };
 
 /* numbers of bits the length field takes (add 1 for the actual number) */
@@ -388,6 +389,17 @@ struct acx_dynamic_fw_traces_cfg {
        __le32 dynamic_fw_traces;
 } __packed;
 
+/*
+ * ACX_TIME_SYNC_CFG
+ * configure the time sync parameters
+ */
+struct acx_time_sync_cfg {
+       struct acx_header header;
+       u8 sync_mode;
+       u8 zone_mac_addr[ETH_ALEN];
+       u8 padding[1];
+} __packed;
+
 int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
                                  u32 sdio_blk_size, u32 extra_mem_blks,
                                  u32 len_field_size);
@@ -402,5 +414,6 @@ int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action);
 int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action);
 int wl18xx_acx_ap_sleep(struct wl1271 *wl);
 int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl);
+int wl18xx_acx_time_sync_cfg(struct wl1271 *wl);
 
 #endif /* __WL18XX_ACX_H__ */
index 2c5df43..b36ce18 100644 (file)
@@ -22,6 +22,7 @@
 #include <net/genetlink.h>
 #include "event.h"
 #include "scan.h"
+#include "conf.h"
 #include "../wlcore/cmd.h"
 #include "../wlcore/debug.h"
 #include "../wlcore/vendor_cmd.h"
index 9e1f2d9..ef6c15b 100644 (file)
@@ -4986,7 +4986,6 @@ static int wl12xx_sta_add(struct wl1271 *wl,
                return ret;
 
        wl_sta = (struct wl1271_station *)sta->drv_priv;
-       wl_sta->wl = wl;
        hlid = wl_sta->hlid;
 
        ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
index 6d24040..0ed526e 100644 (file)
@@ -84,7 +84,7 @@ struct wilink_familiy_data {
        char name[8];
 };
 
-const struct wilink_familiy_data *wilink_data;
+static const struct wilink_familiy_data *wilink_data;
 
 static const struct wilink_familiy_data wl18xx_data = {
        .name = "wl18xx",
index 8f28aa0..1827546 100644 (file)
@@ -501,6 +501,9 @@ struct wl1271 {
 
        /* dynamic fw traces */
        u32 dynamic_fw_traces;
+
+       /* time sync zone master */
+       u8 zone_master_mac_addr[ETH_ALEN];
 };
 
 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
index 242b4e3..0277ae5 100644 (file)
@@ -347,7 +347,6 @@ struct wl1271_station {
         * Used in both AP and STA mode.
         */
        u64 total_freed_pkts;
-       struct wl1271 *wl;
 };
 
 struct wl12xx_vif {
index 82d94f8..932f3f8 100644 (file)
@@ -1258,7 +1258,9 @@ static int wl3501_reset(struct net_device *dev)
 {
        struct wl3501_card *this = netdev_priv(dev);
        int rc = -ENODEV;
+       unsigned long flags;
 
+       spin_lock_irqsave(&this->lock, flags);
        wl3501_block_interrupt(this);
 
        if (wl3501_init_firmware(this)) {
@@ -1280,20 +1282,17 @@ static int wl3501_reset(struct net_device *dev)
        pr_debug("%s: device reset", dev->name);
        rc = 0;
 out:
+       spin_unlock_irqrestore(&this->lock, flags);
        return rc;
 }
 
 static void wl3501_tx_timeout(struct net_device *dev)
 {
-       struct wl3501_card *this = netdev_priv(dev);
        struct net_device_stats *stats = &dev->stats;
-       unsigned long flags;
        int rc;
 
        stats->tx_errors++;
-       spin_lock_irqsave(&this->lock, flags);
        rc = wl3501_reset(dev);
-       spin_unlock_irqrestore(&this->lock, flags);
        if (rc)
                printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
                       dev->name, rc);
index a912dc0..c5effd6 100644 (file)
@@ -193,7 +193,7 @@ static int upload_code(struct usb_device *udev,
                        0, 0, p, sizeof(ret), 5000 /* ms */);
                if (r != sizeof(ret)) {
                        dev_err(&udev->dev,
-                               "control request firmeware confirmation failed."
+                               "control request firmware confirmation failed."
                                " Return value %d\n", r);
                        if (r >= 0)
                                r = -ENODEV;
index 458daf9..935866f 100644 (file)
@@ -185,8 +185,12 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
                return -ENXIO;
 
        nd_desc = nvdimm_bus->nd_desc;
+       /*
+        * if ndctl does not exist, it's PMEM_LEGACY and
+        * we want to just pretend everything is handled.
+        */
        if (!nd_desc->ndctl)
-               return -ENXIO;
+               return len;
 
        memset(&ars_cap, 0, sizeof(ars_cap));
        ars_cap.address = phys;
index db39d53..f7d37a6 100644 (file)
@@ -30,8 +30,8 @@ config NVME_FABRICS
 
 config NVME_RDMA
        tristate "NVM Express over Fabrics RDMA host driver"
-       depends on INFINIBAND
-       depends on BLK_DEV_NVME
+       depends on INFINIBAND && BLOCK
+       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
index dc99676..4eff491 100644 (file)
@@ -47,8 +47,10 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
 
        mutex_lock(&nvmf_hosts_mutex);
        host = __nvmf_host_find(hostnqn);
-       if (host)
+       if (host) {
+               kref_get(&host->ref);
                goto out_unlock;
+       }
 
        host = kmalloc(sizeof(*host), GFP_KERNEL);
        if (!host)
@@ -56,7 +58,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
 
        kref_init(&host->ref);
        memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
-       uuid_le_gen(&host->id);
+       uuid_be_gen(&host->id);
 
        list_add_tail(&host->list, &nvmf_hosts);
 out_unlock:
@@ -73,9 +75,9 @@ static struct nvmf_host *nvmf_host_default(void)
                return NULL;
 
        kref_init(&host->ref);
-       uuid_le_gen(&host->id);
+       uuid_be_gen(&host->id);
        snprintf(host->nqn, NVMF_NQN_SIZE,
-               "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUl", &host->id);
+               "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
 
        mutex_lock(&nvmf_hosts_mutex);
        list_add_tail(&host->list, &nvmf_hosts);
@@ -363,7 +365,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
        cmd.connect.opcode = nvme_fabrics_command;
        cmd.connect.fctype = nvme_fabrics_type_connect;
        cmd.connect.qid = 0;
-       cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+
+       /*
+        * fabrics spec sets a minimum of depth 32 for admin queue,
+        * so set the queue with this depth always until
+        * justification otherwise.
+        */
+       cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+
        /*
         * Set keep-alive timeout in seconds granularity (ms * 1000)
         * and add a grace period for controller kato enforcement
@@ -375,7 +384,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
        if (!data)
                return -ENOMEM;
 
-       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
+       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
        data->cntlid = cpu_to_le16(0xffff);
        strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
        strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@@ -434,7 +443,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
        if (!data)
                return -ENOMEM;
 
-       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
+       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
        data->cntlid = cpu_to_le16(ctrl->cntlid);
        strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
        strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
index 89df52c..46e460a 100644 (file)
@@ -34,7 +34,7 @@ struct nvmf_host {
        struct kref             ref;
        struct list_head        list;
        char                    nqn[NVMF_NQN_SIZE];
-       uuid_le                 id;
+       uuid_be                 id;
 };
 
 /**
index 8d2875b..ab545fb 100644 (file)
 
 #define NVME_RDMA_MAX_INLINE_SEGMENTS  1
 
-#define NVME_RDMA_MAX_PAGES_PER_MR     512
-
-#define NVME_RDMA_DEF_RECONNECT_DELAY  20
-
 /*
  * We handle AEN commands ourselves and don't even let the
  * block layer know about them.
@@ -77,7 +73,6 @@ struct nvme_rdma_request {
        u32                     num_sge;
        int                     nents;
        bool                    inline_data;
-       bool                    need_inval;
        struct ib_reg_wr        reg_wr;
        struct ib_cqe           reg_cqe;
        struct nvme_rdma_queue  *queue;
@@ -286,7 +281,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        int ret = 0;
 
-       if (!req->need_inval)
+       if (!req->mr->need_inval)
                goto out;
 
        ib_dereg_mr(req->mr);
@@ -298,7 +293,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
                req->mr = NULL;
        }
 
-       req->need_inval = false;
+       req->mr->need_inval = false;
 
 out:
        return ret;
@@ -645,7 +640,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
        int i, ret;
 
        for (i = 1; i < ctrl->queue_count; i++) {
-               ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+               ret = nvme_rdma_init_queue(ctrl, i,
+                                          ctrl->ctrl.opts->queue_size);
                if (ret) {
                        dev_info(ctrl->ctrl.device,
                                "failed to initialize i/o queue: %d\n", ret);
@@ -849,7 +845,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
        if (!blk_rq_bytes(rq))
                return;
 
-       if (req->need_inval) {
+       if (req->mr->need_inval) {
                res = nvme_rdma_inv_rkey(queue, req);
                if (res < 0) {
                        dev_err(ctrl->ctrl.device,
@@ -935,7 +931,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
                             IB_ACCESS_REMOTE_READ |
                             IB_ACCESS_REMOTE_WRITE;
 
-       req->need_inval = true;
+       req->mr->need_inval = true;
 
        sg->addr = cpu_to_le64(req->mr->iova);
        put_unaligned_le24(req->mr->length, sg->length);
@@ -958,7 +954,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 
        req->num_sge = 1;
        req->inline_data = false;
-       req->need_inval = false;
+       req->mr->need_inval = false;
 
        c->common.flags |= NVME_CMD_SGL_METABUF;
 
@@ -1145,7 +1141,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
 
        if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
            wc->ex.invalidate_rkey == req->mr->rkey)
-               req->need_inval = false;
+               req->mr->need_inval = false;
 
        blk_mq_complete_request(rq, status);
 
@@ -1278,8 +1274,22 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
 
        priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
        priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
-       priv.hrqsize = cpu_to_le16(queue->queue_size);
-       priv.hsqsize = cpu_to_le16(queue->queue_size);
+       /*
+        * set the admin queue depth to the minimum size
+        * specified by the Fabrics standard.
+        */
+       if (priv.qid == 0) {
+               priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
+               priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+       } else {
+               /*
+                * current interpretation of the fabrics spec
+                * is at minimum you make hrqsize sqsize+1, or a
+                * 1's based representation of sqsize.
+                */
+               priv.hrqsize = cpu_to_le16(queue->queue_size);
+               priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
+       }
 
        ret = rdma_connect(queue->cm_id, &param);
        if (ret) {
@@ -1319,7 +1329,7 @@ out_destroy_queue_ib:
 static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
 {
        struct nvme_rdma_ctrl *ctrl = queue->ctrl;
-       int ret;
+       int ret = 0;
 
        /* Own the controller deletion */
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
@@ -1461,7 +1471,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
                flush = true;
        ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
-                       req->need_inval ? &req->reg_wr.wr : NULL, flush);
+                       req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
        if (ret) {
                nvme_rdma_unmap_data(queue, rq);
                goto err;
@@ -1816,7 +1826,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
 
        memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
        ctrl->tag_set.ops = &nvme_rdma_mq_ops;
-       ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
+       ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
        ctrl->tag_set.reserved_tags = 1; /* fabric connect */
        ctrl->tag_set.numa_node = NUMA_NO_NODE;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -1914,7 +1924,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        spin_lock_init(&ctrl->lock);
 
        ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
-       ctrl->ctrl.sqsize = opts->queue_size;
+       ctrl->ctrl.sqsize = opts->queue_size - 1;
        ctrl->ctrl.kato = opts->kato;
 
        ret = -ENOMEM;
index a5c31cb..3a5b9d0 100644 (file)
@@ -15,8 +15,8 @@ config NVME_TARGET
 
 config NVME_TARGET_LOOP
        tristate "NVMe loopback device support"
-       depends on BLK_DEV_NVME
        depends on NVME_TARGET
+       select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
        help
index 7affd40..395e60d 100644 (file)
@@ -556,7 +556,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
 
        memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
        ctrl->tag_set.ops = &nvme_loop_mq_ops;
-       ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
+       ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
        ctrl->tag_set.reserved_tags = 1; /* fabric connect */
        ctrl->tag_set.numa_node = NUMA_NO_NODE;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -620,7 +620,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
 
        ret = -ENOMEM;
 
-       ctrl->ctrl.sqsize = opts->queue_size;
+       ctrl->ctrl.sqsize = opts->queue_size - 1;
        ctrl->ctrl.kato = opts->kato;
 
        ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
index b4d6485..1cbe6e0 100644 (file)
@@ -978,10 +978,11 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w)
                container_of(w, struct nvmet_rdma_queue, release_work);
        struct rdma_cm_id *cm_id = queue->cm_id;
        struct nvmet_rdma_device *dev = queue->dev;
+       enum nvmet_rdma_queue_state state = queue->state;
 
        nvmet_rdma_free_queue(queue);
 
-       if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL)
+       if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
                rdma_destroy_id(cm_id);
 
        kref_put(&dev->ref, nvmet_rdma_free_dev);
@@ -1003,10 +1004,10 @@ nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
        queue->host_qid = le16_to_cpu(req->qid);
 
        /*
-        * req->hsqsize corresponds to our recv queue size
+        * req->hsqsize corresponds to our recv queue size plus 1
         * req->hrqsize corresponds to our send queue size
         */
-       queue->recv_queue_size = le16_to_cpu(req->hsqsize);
+       queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
        queue->send_queue_size = le16_to_cpu(req->hrqsize);
 
        if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
index 37ff015..44e0ff3 100644 (file)
@@ -3327,9 +3327,9 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
        if (nhi->vendor != PCI_VENDOR_ID_INTEL
                    || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
                        nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
+                       nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
                        nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
-                   || nhi->subsystem_vendor != 0x2222
-                   || nhi->subsystem_device != 0x1111)
+                   || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
                goto out;
        dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
        device_pm_wait_for_dev(&dev->dev, &nhi->dev);
@@ -3343,6 +3343,9 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
                               PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
                               quirk_apple_wait_for_thunderbolt);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+                              PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
+                              quirk_apple_wait_for_thunderbolt);
 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
                               PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
                               quirk_apple_wait_for_thunderbolt);
index c494613..f5e1008 100644 (file)
@@ -925,6 +925,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
                        if (i > 0 && spi != using_spi) {
                                pr_err("PPI/SPI IRQ type mismatch for %s!\n",
                                        dn->name);
+                               of_node_put(dn);
                                kfree(irqs);
                                return -EINVAL;
                        }
@@ -969,7 +970,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
        if (cpumask_weight(&pmu->supported_cpus) == 0) {
                int irq = platform_get_irq(pdev, 0);
 
-               if (irq_is_percpu(irq)) {
+               if (irq >= 0 && irq_is_percpu(irq)) {
                        /* If using PPIs, check the affinity of the partition */
                        int ret;
 
index 18d6626..8ffc44a 100644 (file)
@@ -367,7 +367,7 @@ static int brcm_sata_phy_init(struct phy *phy)
                rc = -ENODEV;
        };
 
-       return 0;
+       return rc;
 }
 
 static const struct phy_ops phy_ops = {
index 0a45bc6..8c7eb33 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/power_supply.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
+#include <linux/usb/of.h>
 #include <linux/workqueue.h>
 
 #define REG_ISCR                       0x00
@@ -110,6 +111,7 @@ struct sun4i_usb_phy_cfg {
 struct sun4i_usb_phy_data {
        void __iomem *base;
        const struct sun4i_usb_phy_cfg *cfg;
+       enum usb_dr_mode dr_mode;
        struct mutex mutex;
        struct sun4i_usb_phy {
                struct phy *phy;
@@ -120,6 +122,7 @@ struct sun4i_usb_phy_data {
                bool regulator_on;
                int index;
        } phys[MAX_PHYS];
+       int first_phy;
        /* phy0 / otg related variables */
        struct extcon_dev *extcon;
        bool phy0_init;
@@ -285,16 +288,10 @@ static int sun4i_usb_phy_init(struct phy *_phy)
                sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN);
                sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN);
 
-               if (data->id_det_gpio) {
-                       /* OTG mode, force ISCR and cable state updates */
-                       data->id_det = -1;
-                       data->vbus_det = -1;
-                       queue_delayed_work(system_wq, &data->detect, 0);
-               } else {
-                       /* Host only mode */
-                       sun4i_usb_phy0_set_id_detect(_phy, 0);
-                       sun4i_usb_phy0_set_vbus_detect(_phy, 1);
-               }
+               /* Force ISCR and cable state updates */
+               data->id_det = -1;
+               data->vbus_det = -1;
+               queue_delayed_work(system_wq, &data->detect, 0);
        }
 
        return 0;
@@ -319,6 +316,19 @@ static int sun4i_usb_phy_exit(struct phy *_phy)
        return 0;
 }
 
+static int sun4i_usb_phy0_get_id_det(struct sun4i_usb_phy_data *data)
+{
+       switch (data->dr_mode) {
+       case USB_DR_MODE_OTG:
+               return gpiod_get_value_cansleep(data->id_det_gpio);
+       case USB_DR_MODE_HOST:
+               return 0;
+       case USB_DR_MODE_PERIPHERAL:
+       default:
+               return 1;
+       }
+}
+
 static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data)
 {
        if (data->vbus_det_gpio)
@@ -432,7 +442,10 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
        struct phy *phy0 = data->phys[0].phy;
        int id_det, vbus_det, id_notify = 0, vbus_notify = 0;
 
-       id_det = gpiod_get_value_cansleep(data->id_det_gpio);
+       if (phy0 == NULL)
+               return;
+
+       id_det = sun4i_usb_phy0_get_id_det(data);
        vbus_det = sun4i_usb_phy0_get_vbus_det(data);
 
        mutex_lock(&phy0->mutex);
@@ -448,7 +461,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
                 * without vbus detection report vbus low for long enough for
                 * the musb-ip to end the current device session.
                 */
-               if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
+               if (data->dr_mode == USB_DR_MODE_OTG &&
+                   !sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
                        sun4i_usb_phy0_set_vbus_detect(phy0, 0);
                        msleep(200);
                        sun4i_usb_phy0_set_vbus_detect(phy0, 1);
@@ -474,7 +488,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
                 * without vbus detection report vbus low for long enough to
                 * the musb-ip to end the current host session.
                 */
-               if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
+               if (data->dr_mode == USB_DR_MODE_OTG &&
+                   !sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
                        mutex_lock(&phy0->mutex);
                        sun4i_usb_phy0_set_vbus_detect(phy0, 0);
                        msleep(1000);
@@ -519,7 +534,8 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
 {
        struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
 
-       if (args->args[0] >= data->cfg->num_phys)
+       if (args->args[0] < data->first_phy ||
+           args->args[0] >= data->cfg->num_phys)
                return ERR_PTR(-ENODEV);
 
        return data->phys[args->args[0]].phy;
@@ -593,13 +609,17 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
                        return -EPROBE_DEFER;
        }
 
-       /* vbus_det without id_det makes no sense, and is not supported */
-       if (sun4i_usb_phy0_have_vbus_det(data) && !data->id_det_gpio) {
-               dev_err(dev, "usb0_id_det missing or invalid\n");
-               return -ENODEV;
-       }
-
-       if (data->id_det_gpio) {
+       data->dr_mode = of_usb_get_dr_mode_by_phy(np, 0);
+       switch (data->dr_mode) {
+       case USB_DR_MODE_OTG:
+               /* otg without id_det makes no sense, and is not supported */
+               if (!data->id_det_gpio) {
+                       dev_err(dev, "usb0_id_det missing or invalid\n");
+                       return -ENODEV;
+               }
+               /* fall through */
+       case USB_DR_MODE_HOST:
+       case USB_DR_MODE_PERIPHERAL:
                data->extcon = devm_extcon_dev_allocate(dev,
                                                        sun4i_usb_phy0_cable);
                if (IS_ERR(data->extcon))
@@ -610,9 +630,13 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
                        dev_err(dev, "failed to register extcon: %d\n", ret);
                        return ret;
                }
+               break;
+       default:
+               dev_info(dev, "dr_mode unknown, not registering usb phy0\n");
+               data->first_phy = 1;
        }
 
-       for (i = 0; i < data->cfg->num_phys; i++) {
+       for (i = data->first_phy; i < data->cfg->num_phys; i++) {
                struct sun4i_usb_phy *phy = data->phys + i;
                char name[16];
 
index ac4f31a..28fce4b 100644 (file)
@@ -141,9 +141,9 @@ static int sun9i_usb_phy_probe(struct platform_device *pdev)
                }
 
                phy->hsic_clk = devm_clk_get(dev, "hsic_12M");
-               if (IS_ERR(phy->clk)) {
+               if (IS_ERR(phy->hsic_clk)) {
                        dev_err(dev, "failed to get hsic_12M clock\n");
-                       return PTR_ERR(phy->clk);
+                       return PTR_ERR(phy->hsic_clk);
                }
 
                phy->reset = devm_reset_control_get(dev, "hsic");
index 5749a4e..0fe8fad 100644 (file)
@@ -1539,12 +1539,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                offset += range->npins;
        }
 
-       /* Mask and clear all interrupts */
-       chv_writel(0, pctrl->regs + CHV_INTMASK);
+       /* Clear all interrupts */
        chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
 
        ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
-                                  handle_simple_irq, IRQ_TYPE_NONE);
+                                  handle_bad_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(pctrl->dev, "failed to add IRQ chip\n");
                goto fail;
index 7bad200..55375b1 100644 (file)
@@ -809,17 +809,17 @@ static const struct pistachio_pin_group pistachio_groups[] = {
                           PADS_FUNCTION_SELECT2, 12, 0x3),
        MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
                           PADS_FUNCTION_SELECT2, 14, 0x3),
-       MFIO_MUX_PIN_GROUP(84, SYS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
+       MFIO_MUX_PIN_GROUP(84, AUDIO_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
                           PADS_FUNCTION_SELECT2, 16, 0x3),
-       MFIO_MUX_PIN_GROUP(85, WIFI_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
+       MFIO_MUX_PIN_GROUP(85, RPU_V_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
                           PADS_FUNCTION_SELECT2, 18, 0x3),
-       MFIO_MUX_PIN_GROUP(86, BT_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
+       MFIO_MUX_PIN_GROUP(86, RPU_L_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
                           PADS_FUNCTION_SELECT2, 20, 0x3),
-       MFIO_MUX_PIN_GROUP(87, RPU_V_PLL_LOCK, DREQ2, SOCIF_DEBUG,
+       MFIO_MUX_PIN_GROUP(87, SYS_PLL_LOCK, DREQ2, SOCIF_DEBUG,
                           PADS_FUNCTION_SELECT2, 22, 0x3),
-       MFIO_MUX_PIN_GROUP(88, RPU_L_PLL_LOCK, DREQ3, SOCIF_DEBUG,
+       MFIO_MUX_PIN_GROUP(88, WIFI_PLL_LOCK, DREQ3, SOCIF_DEBUG,
                           PADS_FUNCTION_SELECT2, 24, 0x3),
-       MFIO_MUX_PIN_GROUP(89, AUDIO_PLL_LOCK, DREQ4, DREQ5,
+       MFIO_MUX_PIN_GROUP(89, BT_PLL_LOCK, DREQ4, DREQ5,
                           PADS_FUNCTION_SELECT2, 26, 0x3),
        PIN_GROUP(TCK, "tck"),
        PIN_GROUP(TRSTN, "trstn"),
index ce483b0..f9d661e 100644 (file)
@@ -485,12 +485,12 @@ static const struct sunxi_desc_pin sun8i_a23_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* RTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* RTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)),  /* PG_EINT8 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* CTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* CTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)),  /* PG_EINT9 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
index 3040abe..3131cac 100644 (file)
@@ -407,12 +407,12 @@ static const struct sunxi_desc_pin sun8i_a33_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* RTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* RTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)),  /* PG_EINT8 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x2, "uart2"),         /* CTS */
+                 SUNXI_FUNCTION(0x2, "uart1"),         /* CTS */
                  SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)),  /* PG_EINT9 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
index 32f0f01..9d19b9a 100644 (file)
@@ -1161,7 +1161,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
                } else if (ibw_start < (ib_win->rstart + ib_win->size) &&
                           (ibw_start + ibw_size) > ib_win->rstart) {
                        /* Return error if address translation involved */
-                       if (direct && ib_win->xlat) {
+                       if (!direct || ib_win->xlat) {
                                ret = -EFAULT;
                                break;
                        }
index b2daa66..c9ff261 100644 (file)
@@ -2,7 +2,7 @@
  * max14577.c - Regulator driver for the Maxim 14577/77836
  *
  * Copyright (C) 2013,2014 Samsung Electronics
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -331,7 +331,7 @@ static void __exit max14577_regulator_exit(void)
 }
 module_exit(max14577_regulator_exit);
 
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
 MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:max14577-regulator");
index de730fd..cfbb951 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2013-2015 Samsung Electronics
  * Jonghwa Lee <jonghwa3.lee@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski.k@gmail.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -314,5 +314,5 @@ module_exit(max77693_pmic_cleanup);
 
 MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver");
 MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski.k@gmail.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
 MODULE_LICENSE("GPL");
index 5022fa8..8ed46a9 100644 (file)
@@ -178,20 +178,21 @@ static const struct regulator_desc pma8084_hfsmps = {
 static const struct regulator_desc pma8084_ftsmps = {
        .linear_ranges = (struct regulator_linear_range[]) {
                REGULATOR_LINEAR_RANGE(350000,  0, 184, 5000),
-               REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+               REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
        },
        .n_linear_ranges = 2,
-       .n_voltages = 340,
+       .n_voltages = 262,
        .ops = &rpm_smps_ldo_ops,
 };
 
 static const struct regulator_desc pma8084_pldo = {
        .linear_ranges = (struct regulator_linear_range[]) {
-               REGULATOR_LINEAR_RANGE(750000,  0,  30, 25000),
-               REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+               REGULATOR_LINEAR_RANGE( 750000,  0,  63, 12500),
+               REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+               REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
        },
-       .n_linear_ranges = 2,
-       .n_voltages = 100,
+       .n_linear_ranges = 3,
+       .n_voltages = 164,
        .ops = &rpm_smps_ldo_ops,
 };
 
@@ -221,29 +222,30 @@ static const struct regulator_desc pm8x41_hfsmps = {
 static const struct regulator_desc pm8841_ftsmps = {
        .linear_ranges = (struct regulator_linear_range[]) {
                REGULATOR_LINEAR_RANGE(350000,  0, 184, 5000),
-               REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+               REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
        },
        .n_linear_ranges = 2,
-       .n_voltages = 340,
+       .n_voltages = 262,
        .ops = &rpm_smps_ldo_ops,
 };
 
 static const struct regulator_desc pm8941_boost = {
        .linear_ranges = (struct regulator_linear_range[]) {
-               REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000),
+               REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
        },
        .n_linear_ranges = 1,
-       .n_voltages = 16,
+       .n_voltages = 31,
        .ops = &rpm_smps_ldo_ops,
 };
 
 static const struct regulator_desc pm8941_pldo = {
        .linear_ranges = (struct regulator_linear_range[]) {
-               REGULATOR_LINEAR_RANGE( 750000,  0,  30, 25000),
-               REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+               REGULATOR_LINEAR_RANGE( 750000,  0,  63, 12500),
+               REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+               REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
        },
-       .n_linear_ranges = 2,
-       .n_voltages = 100,
+       .n_linear_ranges = 3,
+       .n_voltages = 164,
        .ops = &rpm_smps_ldo_ops,
 };
 
index 83458f7..6dc96c8 100644 (file)
@@ -361,8 +361,9 @@ static const char * const snstext[] = {
 
 /* Get sense key string or NULL if not available */
 const char *
-scsi_sense_key_string(unsigned char key) {
-       if (key <= 0xE)
+scsi_sense_key_string(unsigned char key)
+{
+       if (key < ARRAY_SIZE(snstext))
                return snstext[key];
        return NULL;
 }
index eaccd65..2464569 100644 (file)
@@ -246,6 +246,10 @@ static struct {
        {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"STK", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+       {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
        {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
        {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
        {"SONY", "TSL", NULL, BLIST_FORCELUN},          /* DDS3 & DDS4 autoloaders */
index 3f0ff07..60b651b 100644 (file)
@@ -340,22 +340,6 @@ static int do_sas_phy_delete(struct device *dev, void *data)
        return 0;
 }
 
-/**
- * is_sas_attached - check if device is SAS attached
- * @sdev: scsi device to check
- *
- * returns true if the device is SAS attached
- */
-int is_sas_attached(struct scsi_device *sdev)
-{
-       struct Scsi_Host *shost = sdev->host;
-
-       return shost->transportt->host_attrs.ac.class ==
-               &sas_host_class.class;
-}
-EXPORT_SYMBOL(is_sas_attached);
-
-
 /**
  * sas_remove_children  -  tear down a devices SAS data structures
  * @dev:       device belonging to the sas object
index 0e8601a..8c9a35c 100644 (file)
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
 
        ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
 
-       if (is_sas_attached(sdev))
+       if (scsi_is_sas_rphy(&sdev->sdev_gendev))
                efd.addr = sas_get_address(sdev);
 
        if (efd.addr) {
index e3da1a2..2a9da2e 100644 (file)
@@ -962,7 +962,7 @@ static void wd719x_pci_remove(struct pci_dev *pdev)
        scsi_host_put(sh);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = {
+static const struct pci_device_id wd719x_pci_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) },
        {}
 };
index 823cbc9..7a37090 100644 (file)
@@ -720,8 +720,6 @@ static int img_spfi_remove(struct platform_device *pdev)
                clk_disable_unprepare(spfi->sys_clk);
        }
 
-       spi_master_put(master);
-
        return 0;
 }
 
index 0be89e0..899d7a8 100644 (file)
@@ -685,7 +685,6 @@ static int mtk_spi_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 
        mtk_spi_reset(mdata);
-       spi_master_put(master);
 
        return 0;
 }
index f3df522..58d2d48 100644 (file)
@@ -214,6 +214,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
                return PTR_ERR(ssp->clk);
 
        memset(&pi, 0, sizeof(pi));
+       pi.fwnode = dev->dev.fwnode;
        pi.parent = &dev->dev;
        pi.name = "pxa2xx-spi";
        pi.id = ssp->port_id;
index c338ef1..7f15556 100644 (file)
@@ -1030,7 +1030,6 @@ static int spi_qup_remove(struct platform_device *pdev)
 
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       spi_master_put(master);
 
        return 0;
 }
index 0f83ad1..1de3a77 100644 (file)
@@ -262,6 +262,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
 
        for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
                brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
+               /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+               if (sh_msiof_spi_div_table[k].div == 1 && brps > 2)
+                       continue;
                if (brps <= 32) /* max of brdv is 32 */
                        break;
        }
index 51ad42f..200ca22 100644 (file)
@@ -960,7 +960,7 @@ static int spi_transfer_one_message(struct spi_master *master,
        struct spi_transfer *xfer;
        bool keep_cs = false;
        int ret = 0;
-       unsigned long ms = 1;
+       unsigned long long ms = 1;
        struct spi_statistics *statm = &master->statistics;
        struct spi_statistics *stats = &msg->spi->statistics;
 
@@ -991,9 +991,13 @@ static int spi_transfer_one_message(struct spi_master *master,
 
                        if (ret > 0) {
                                ret = 0;
-                               ms = xfer->len * 8 * 1000 / xfer->speed_hz;
+                               ms = 8LL * 1000LL * xfer->len;
+                               do_div(ms, xfer->speed_hz);
                                ms += ms + 100; /* some tolerance */
 
+                               if (ms > UINT_MAX)
+                                       ms = UINT_MAX;
+
                                ms = wait_for_completion_timeout(&master->xfer_completion,
                                                                 msecs_to_jiffies(ms));
                        }
@@ -1159,6 +1163,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
                if (ret < 0) {
                        dev_err(&master->dev, "Failed to power device: %d\n",
                                ret);
+                       mutex_unlock(&master->io_mutex);
                        return;
                }
        }
@@ -1174,6 +1179,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
 
                        if (master->auto_runtime_pm)
                                pm_runtime_put(master->dev.parent);
+                       mutex_unlock(&master->io_mutex);
                        return;
                }
        }
index d7dd1e5..9f525ff 100644 (file)
@@ -196,6 +196,7 @@ static int pci1760_pwm_ns_to_div(unsigned int flags, unsigned int ns)
                break;
        case CMDF_ROUND_DOWN:
                divisor = ns / PCI1760_PWM_TIMEBASE;
+               break;
        default:
                return -EINVAL;
        }
index 4ab1866..ec5b9a2 100644 (file)
 
 #define N_CHANS 8
 
-enum waveform_state_bits {
-       WAVEFORM_AI_RUNNING,
-       WAVEFORM_AO_RUNNING
-};
-
 /* Data unique to this driver */
 struct waveform_private {
        struct timer_list ai_timer;     /* timer for AI commands */
@@ -68,7 +63,6 @@ struct waveform_private {
        unsigned int wf_amplitude;      /* waveform amplitude in microvolts */
        unsigned int wf_period;         /* waveform period in microseconds */
        unsigned int wf_current;        /* current time in waveform period */
-       unsigned long state_bits;
        unsigned int ai_scan_period;    /* AI scan period in usec */
        unsigned int ai_convert_period; /* AI conversion period in usec */
        struct timer_list ao_timer;     /* timer for AO commands */
@@ -191,10 +185,6 @@ static void waveform_ai_timer(unsigned long arg)
        unsigned int nsamples;
        unsigned int time_increment;
 
-       /* check command is still active */
-       if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
-               return;
-
        now = ktime_to_us(ktime_get());
        nsamples = comedi_nsamples_left(s, UINT_MAX);
 
@@ -386,11 +376,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
         */
        devpriv->ai_timer.expires =
                jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
-
-       /* mark command as active */
-       smp_mb__before_atomic();
-       set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
        add_timer(&devpriv->ai_timer);
        return 0;
 }
@@ -400,11 +385,12 @@ static int waveform_ai_cancel(struct comedi_device *dev,
 {
        struct waveform_private *devpriv = dev->private;
 
-       /* mark command as no longer active */
-       clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
-       /* cannot call del_timer_sync() as may be called from timer routine */
-       del_timer(&devpriv->ai_timer);
+       if (in_softirq()) {
+               /* Assume we were called from the timer routine itself. */
+               del_timer(&devpriv->ai_timer);
+       } else {
+               del_timer_sync(&devpriv->ai_timer);
+       }
        return 0;
 }
 
@@ -436,10 +422,6 @@ static void waveform_ao_timer(unsigned long arg)
        u64 scans_since;
        unsigned int scans_avail = 0;
 
-       /* check command is still active */
-       if (!test_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits))
-               return;
-
        /* determine number of scan periods since last time */
        now = ktime_to_us(ktime_get());
        scans_since = now - devpriv->ao_last_scan_time;
@@ -518,11 +500,6 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
        devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
        devpriv->ao_timer.expires =
                jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
-
-       /* mark command as active */
-       smp_mb__before_atomic();
-       set_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
        add_timer(&devpriv->ao_timer);
 
        return 1;
@@ -608,11 +585,12 @@ static int waveform_ao_cancel(struct comedi_device *dev,
        struct waveform_private *devpriv = dev->private;
 
        s->async->inttrig = NULL;
-       /* mark command as no longer active */
-       clear_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
-       smp_mb__after_atomic();
-       /* cannot call del_timer_sync() as may be called from timer routine */
-       del_timer(&devpriv->ao_timer);
+       if (in_softirq()) {
+               /* Assume we were called from the timer routine itself. */
+               del_timer(&devpriv->ao_timer);
+       } else {
+               del_timer_sync(&devpriv->ao_timer);
+       }
        return 0;
 }
 
index 65daef0..0f4eb95 100644 (file)
@@ -634,7 +634,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev,
        const struct daq200_boardtype *board;
        int i;
 
-       if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
+       if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH)
                return NULL;
 
        for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
index 904f637..8bbd938 100644 (file)
@@ -588,8 +588,8 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
        s = &dev->subdevices[0];
        s->type         = COMEDI_SUBD_AI;
        s->subdev_flags = SDF_READABLE |
-                         (it->options[2] == 1) ? SDF_DIFF :
-                         (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND;
+                         ((it->options[2] == 1) ? SDF_DIFF :
+                          (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND);
        s->n_chan       = (it->options[2] == 1) ? 8 : 16;
        s->maxdata      = 0x0fff;
        s->range_table  = board->is_pgh ? &dt2811_pgh_ai_ranges
index 8dabb19..0f97d7b 100644 (file)
@@ -2772,7 +2772,15 @@ static int ni_ao_inttrig(struct comedi_device *dev,
        int i;
        static const int timeout = 1000;
 
-       if (trig_num != cmd->start_arg)
+       /*
+        * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
+        * For backwards compatibility, also allow trig_num == 0 when
+        * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
+        * in that case, the internal trigger is being used as a pre-trigger
+        * before the external trigger.
+        */
+       if (!(trig_num == cmd->start_arg ||
+             (trig_num == 0 && cmd->start_src != TRIG_INT)))
                return -EINVAL;
 
        /*
@@ -5480,7 +5488,7 @@ static int ni_E_init(struct comedi_device *dev,
                s->maxdata      = (devpriv->is_m_series) ? 0xffffffff
                                                         : 0x00ffffff;
                s->insn_read    = ni_tio_insn_read;
-               s->insn_write   = ni_tio_insn_read;
+               s->insn_write   = ni_tio_insn_write;
                s->insn_config  = ni_tio_insn_config;
 #ifdef PCIDMA
                if (dev->irq && devpriv->mite) {
index 170ac98..24c348d 100644 (file)
@@ -419,6 +419,7 @@ static ssize_t ad5933_store(struct device *dev,
        mutex_lock(&indio_dev->mlock);
        switch ((u32)this_attr->address) {
        case AD5933_OUT_RANGE:
+               ret = -EINVAL;
                for (i = 0; i < 4; i++)
                        if (val == st->range_avail[i]) {
                                st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3);
@@ -426,7 +427,6 @@ static ssize_t ad5933_store(struct device *dev,
                                ret = ad5933_cmd(st, 0);
                                break;
                        }
-               ret = -EINVAL;
                break;
        case AD5933_IN_PGA_GAIN:
                if (sysfs_streq(buf, "1")) {
index 3664bfd..2c4dc69 100644 (file)
@@ -388,6 +388,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
        struct inode *inode = NULL;
        __u64 bits = 0;
        int rc = 0;
+       struct dentry *alias;
 
        /* NB 1 request reference will be taken away by ll_intent_lock()
         * when I return
@@ -412,26 +413,12 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
                 */
        }
 
-       /* Only hash *de if it is unhashed (new dentry).
-        * Atoimc_open may passing hashed dentries for open.
-        */
-       if (d_unhashed(*de)) {
-               struct dentry *alias;
-
-               alias = ll_splice_alias(inode, *de);
-               if (IS_ERR(alias)) {
-                       rc = PTR_ERR(alias);
-                       goto out;
-               }
-               *de = alias;
-       } else if (!it_disposition(it, DISP_LOOKUP_NEG)  &&
-                  !it_disposition(it, DISP_OPEN_CREATE)) {
-               /* With DISP_OPEN_CREATE dentry will be
-                * instantiated in ll_create_it.
-                */
-               LASSERT(!d_inode(*de));
-               d_instantiate(*de, inode);
+       alias = ll_splice_alias(inode, *de);
+       if (IS_ERR(alias)) {
+               rc = PTR_ERR(alias);
+               goto out;
        }
+       *de = alias;
 
        if (!it_disposition(it, DISP_LOOKUP_NEG)) {
                /* we have lookup look - unhide dentry */
@@ -587,6 +574,24 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
               dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
               *opened);
 
+       /* Only negative dentries enter here */
+       LASSERT(!d_inode(dentry));
+
+       if (!d_in_lookup(dentry)) {
+               /* A valid negative dentry that just passed revalidation,
+                * there's little point to try and open it server-side,
+                * even though there's a minuscle chance it might succeed.
+                * Either way it's a valid race to just return -ENOENT here.
+                */
+               if (!(open_flags & O_CREAT))
+                       return -ENOENT;
+
+               /* Otherwise we just unhash it to be rehashed afresh via
+                * lookup if necessary
+                */
+               d_drop(dentry);
+       }
+
        it = kzalloc(sizeof(*it), GFP_NOFS);
        if (!it)
                return -ENOMEM;
index 0b1760c..78f524f 100644 (file)
@@ -3363,7 +3363,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
                if (!hif_workqueue) {
                        netdev_err(vif->ndev, "Failed to create workqueue\n");
                        result = -ENOMEM;
-                       goto _fail_mq_;
+                       goto _fail_;
                }
 
                setup_timer(&periodic_rssi, GetPeriodicRSSI,
@@ -3391,7 +3391,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
 
        clients_count++;
 
-_fail_mq_:
        destroy_workqueue(hif_workqueue);
 _fail_:
        return result;
index 3a66255..3221511 100644 (file)
@@ -648,7 +648,7 @@ void wilc1000_wlan_deinit(struct net_device *dev)
                        mutex_unlock(&wl->hif_cs);
                }
                if (&wl->txq_event)
-                       wait_for_completion(&wl->txq_event);
+                       complete(&wl->txq_event);
 
                wlan_deinitialize_threads(dev);
                deinit_irq(dev);
index 9092600..2c2e8ac 100644 (file)
@@ -1191,7 +1191,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
        struct wilc_priv *priv;
        struct wilc_vif *vif;
        u32 i = 0;
-       u32 associatedsta = 0;
+       u32 associatedsta = ~0;
        u32 inactive_time = 0;
        priv = wiphy_priv(wiphy);
        vif = netdev_priv(dev);
@@ -1204,7 +1204,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
                        }
                }
 
-               if (associatedsta == -1) {
+               if (associatedsta == ~0) {
                        netdev_err(dev, "sta required is not associated\n");
                        return -ENOENT;
                }
index 0ae0b13..2fb1bf1 100644 (file)
@@ -24,6 +24,7 @@
 #include <net/ip6_route.h>
 #include <net/addrconf.h>
 
+#include <libcxgb_cm.h>
 #include "cxgbit.h"
 #include "clip_tbl.h"
 
@@ -72,15 +73,6 @@ out:
        return wr_waitp->ret;
 }
 
-/* Returns whether a CPL status conveys negative advice.
- */
-static int cxgbit_is_neg_adv(unsigned int status)
-{
-       return status == CPL_ERR_RTX_NEG_ADVICE ||
-               status == CPL_ERR_PERSIST_NEG_ADVICE ||
-               status == CPL_ERR_KEEPALV_NEG_ADVICE;
-}
-
 static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
 {
        return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
@@ -623,21 +615,14 @@ void cxgbit_free_np(struct iscsi_np *np)
 static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
 {
        struct sk_buff *skb;
-       struct cpl_close_con_req *req;
-       unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16);
+       u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
 
        skb = alloc_skb(len, GFP_ATOMIC);
        if (!skb)
                return;
 
-       req = (struct cpl_close_con_req *)__skb_put(skb, len);
-       memset(req, 0, len);
-
-       set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
-       INIT_TP_WR(req, csk->tid);
-       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
-                                                   csk->tid));
-       req->rsvd = 0;
+       cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
+                             NULL, NULL);
 
        cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
        __skb_queue_tail(&csk->txq, skb);
@@ -662,9 +647,8 @@ static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
 
 static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
 {
-       struct cpl_abort_req *req;
-       unsigned int len = roundup(sizeof(*req), 16);
        struct sk_buff *skb;
+       u32 len = roundup(sizeof(struct cpl_abort_req), 16);
 
        pr_debug("%s: csk %p tid %u; state %d\n",
                 __func__, csk, csk->tid, csk->com.state);
@@ -675,15 +659,9 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
                cxgbit_send_tx_flowc_wr(csk);
 
        skb = __skb_dequeue(&csk->skbq);
-       req = (struct cpl_abort_req *)__skb_put(skb, len);
-       memset(req, 0, len);
+       cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
+                         csk->com.cdev, cxgbit_abort_arp_failure);
 
-       set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
-       t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure);
-       INIT_TP_WR(req, csk->tid);
-       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ,
-                                                   csk->tid));
-       req->cmd = CPL_ABORT_SEND_RST;
        return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 }
 
@@ -789,109 +767,6 @@ void _cxgbit_free_csk(struct kref *kref)
        kfree(csk);
 }
 
-static void
-cxgbit_get_tuple_info(struct cpl_pass_accept_req *req, int *iptype,
-                     __u8 *local_ip, __u8 *peer_ip, __be16 *local_port,
-                     __be16 *peer_port)
-{
-       u32 eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
-       u32 ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
-       struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
-       struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
-       struct tcphdr *tcp = (struct tcphdr *)
-                             ((u8 *)(req + 1) + eth_len + ip_len);
-
-       if (ip->version == 4) {
-               pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
-                        __func__,
-                        ntohl(ip->saddr), ntohl(ip->daddr),
-                        ntohs(tcp->source),
-                        ntohs(tcp->dest));
-               *iptype = 4;
-               memcpy(peer_ip, &ip->saddr, 4);
-               memcpy(local_ip, &ip->daddr, 4);
-       } else {
-               pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
-                        __func__,
-                        ip6->saddr.s6_addr, ip6->daddr.s6_addr,
-                        ntohs(tcp->source),
-                        ntohs(tcp->dest));
-               *iptype = 6;
-               memcpy(peer_ip, ip6->saddr.s6_addr, 16);
-               memcpy(local_ip, ip6->daddr.s6_addr, 16);
-       }
-
-       *peer_port = tcp->source;
-       *local_port = tcp->dest;
-}
-
-static int
-cxgbit_our_interface(struct cxgbit_device *cdev, struct net_device *egress_dev)
-{
-       u8 i;
-
-       egress_dev = cxgbit_get_real_dev(egress_dev);
-       for (i = 0; i < cdev->lldi.nports; i++)
-               if (cdev->lldi.ports[i] == egress_dev)
-                       return 1;
-       return 0;
-}
-
-static struct dst_entry *
-cxgbit_find_route6(struct cxgbit_device *cdev, __u8 *local_ip, __u8 *peer_ip,
-                  __be16 local_port, __be16 peer_port, u8 tos,
-                  __u32 sin6_scope_id)
-{
-       struct dst_entry *dst = NULL;
-
-       if (IS_ENABLED(CONFIG_IPV6)) {
-               struct flowi6 fl6;
-
-               memset(&fl6, 0, sizeof(fl6));
-               memcpy(&fl6.daddr, peer_ip, 16);
-               memcpy(&fl6.saddr, local_ip, 16);
-               if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
-                       fl6.flowi6_oif = sin6_scope_id;
-               dst = ip6_route_output(&init_net, NULL, &fl6);
-               if (!dst)
-                       goto out;
-               if (!cxgbit_our_interface(cdev, ip6_dst_idev(dst)->dev) &&
-                   !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
-                       dst_release(dst);
-                       dst = NULL;
-               }
-       }
-out:
-       return dst;
-}
-
-static struct dst_entry *
-cxgbit_find_route(struct cxgbit_device *cdev, __be32 local_ip, __be32 peer_ip,
-                 __be16 local_port, __be16 peer_port, u8 tos)
-{
-       struct rtable *rt;
-       struct flowi4 fl4;
-       struct neighbour *n;
-
-       rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip,
-                                  local_ip,
-                                  peer_port, local_port, IPPROTO_TCP,
-                                  tos, 0);
-       if (IS_ERR(rt))
-               return NULL;
-       n = dst_neigh_lookup(&rt->dst, &peer_ip);
-       if (!n)
-               return NULL;
-       if (!cxgbit_our_interface(cdev, n->dev) &&
-           !(n->dev->flags & IFF_LOOPBACK)) {
-               neigh_release(n);
-               dst_release(&rt->dst);
-               return NULL;
-       }
-       neigh_release(n);
-       return &rt->dst;
-}
-
 static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
 {
        unsigned int linkspeed;
@@ -1072,21 +947,14 @@ int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
 
 static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
 {
-       struct cpl_tid_release *req;
-       unsigned int len = roundup(sizeof(*req), 16);
+       u32 len = roundup(sizeof(struct cpl_tid_release), 16);
        struct sk_buff *skb;
 
        skb = alloc_skb(len, GFP_ATOMIC);
        if (!skb)
                return;
 
-       req = (struct cpl_tid_release *)__skb_put(skb, len);
-       memset(req, 0, len);
-
-       INIT_TP_WR(req, tid);
-       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(
-                  CPL_TID_RELEASE, tid));
-       set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+       cxgb_mk_tid_release(skb, len, tid, 0);
        cxgbit_ofld_send(cdev, skb);
 }
 
@@ -1108,20 +976,6 @@ cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
        return ret < 0 ? ret : 0;
 }
 
-static void
-cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu,
-               unsigned int *idx, int use_ts, int ipv6)
-{
-       unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) :
-                                  sizeof(struct iphdr)) +
-                                  sizeof(struct tcphdr) +
-                                  (use_ts ? round_up(TCPOLEN_TIMESTAMP,
-                                   4) : 0);
-       unsigned short data_size = mtu - hdr_size;
-
-       cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
-}
-
 static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
 {
        if (csk->com.state != CSK_STATE_ESTABLISHED) {
@@ -1140,22 +994,18 @@ static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
 int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
 {
        struct sk_buff *skb;
-       struct cpl_rx_data_ack *req;
-       unsigned int len = roundup(sizeof(*req), 16);
+       u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
+       u32 credit_dack;
 
        skb = alloc_skb(len, GFP_KERNEL);
        if (!skb)
                return -1;
 
-       req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
-       memset(req, 0, len);
+       credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
+                     RX_CREDITS_V(csk->rx_credits);
 
-       set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx);
-       INIT_TP_WR(req, csk->tid);
-       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
-                                                   csk->tid));
-       req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
-                                      RX_CREDITS_V(csk->rx_credits));
+       cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
+                           credit_dack);
 
        csk->rx_credits = 0;
 
@@ -1210,15 +1060,6 @@ out:
        return -ENOMEM;
 }
 
-static u32 cxgbit_compute_wscale(u32 win)
-{
-       u32 wscale = 0;
-
-       while (wscale < 14 && (65535 << wscale) < win)
-               wscale++;
-       return wscale;
-}
-
 static void
 cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
 {
@@ -1246,10 +1087,10 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
        INIT_TP_WR(rpl5, csk->tid);
        OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
                                                     csk->tid));
-       cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
-                       req->tcpopt.tstamp,
-                       (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
-       wscale = cxgbit_compute_wscale(csk->rcv_win);
+       cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
+                     req->tcpopt.tstamp,
+                     (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+       wscale = cxgb_compute_wscale(csk->rcv_win);
        /*
         * Specify the largest window that will fit in opt0. The
         * remainder will be specified in the rx_data_ack.
@@ -1340,8 +1181,8 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
                goto rel_skb;
        }
 
-       cxgbit_get_tuple_info(req, &iptype, local_ip, peer_ip,
-                             &local_port, &peer_port);
+       cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
+                       peer_ip, &local_port, &peer_port);
 
        /* Find output route */
        if (iptype == 4)  {
@@ -1350,21 +1191,23 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
                         , __func__, cnp, tid,
                         local_ip, peer_ip, ntohs(local_port),
                         ntohs(peer_port), peer_mss);
-               dst = cxgbit_find_route(cdev, *(__be32 *)local_ip,
-                                       *(__be32 *)peer_ip,
-                                       local_port, peer_port,
-                                       PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+               dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
+                                     *(__be32 *)local_ip,
+                                     *(__be32 *)peer_ip,
+                                     local_port, peer_port,
+                                     PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
        } else {
                pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
                         "lport %d rport %d peer_mss %d\n"
                         , __func__, cnp, tid,
                         local_ip, peer_ip, ntohs(local_port),
                         ntohs(peer_port), peer_mss);
-               dst = cxgbit_find_route6(cdev, local_ip, peer_ip,
-                                        local_port, peer_port,
-                                        PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
-                                        ((struct sockaddr_in6 *)
-                                        &cnp->com.local_addr)->sin6_scope_id);
+               dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
+                                      local_ip, peer_ip,
+                                      local_port, peer_port,
+                                      PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+                                      ((struct sockaddr_in6 *)
+                                       &cnp->com.local_addr)->sin6_scope_id);
        }
        if (!dst) {
                pr_err("%s - failed to find dst entry!\n",
@@ -1795,16 +1638,15 @@ static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
 {
        struct cpl_abort_req_rss *hdr = cplhdr(skb);
        unsigned int tid = GET_TID(hdr);
-       struct cpl_abort_rpl *rpl;
        struct sk_buff *rpl_skb;
        bool release = false;
        bool wakeup_thread = false;
-       unsigned int len = roundup(sizeof(*rpl), 16);
+       u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
 
        pr_debug("%s: csk %p; tid %u; state %d\n",
                 __func__, csk, tid, csk->com.state);
 
-       if (cxgbit_is_neg_adv(hdr->status)) {
+       if (cxgb_is_neg_adv(hdr->status)) {
                pr_err("%s: got neg advise %d on tid %u\n",
                       __func__, hdr->status, tid);
                goto rel_skb;
@@ -1839,14 +1681,8 @@ static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
                cxgbit_send_tx_flowc_wr(csk);
 
        rpl_skb = __skb_dequeue(&csk->skbq);
-       set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
-
-       rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len);
-       memset(rpl, 0, len);
 
-       INIT_TP_WR(rpl, csk->tid);
-       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
-       rpl->cmd = CPL_ABORT_NO_RST;
+       cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
        cxgbit_ofld_send(csk->com.cdev, rpl_skb);
 
        if (wakeup_thread) {
index 71a3392..5f81792 100644 (file)
@@ -504,6 +504,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                if (IS_ERR(priv->zone)) {
                        dev_err(dev, "can't register thermal zone\n");
                        ret = PTR_ERR(priv->zone);
+                       priv->zone = NULL;
                        goto error_unregister;
                }
 
index 9c15344..a8c2041 100644 (file)
@@ -648,6 +648,12 @@ static struct pci_device_id nhi_ids[] = {
                .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
                .subvendor = 0x2222, .subdevice = 0x1111,
        },
+       {
+               .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+               .vendor = PCI_VENDOR_ID_INTEL,
+               .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
+               .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
+       },
        {
                .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
                .vendor = PCI_VENDOR_ID_INTEL,
index 1e116f5..9840fde 100644 (file)
@@ -372,7 +372,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
 
        if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
            sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
-           sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE)
+           sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE &&
+           sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE &&
+           sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE)
                tb_sw_warn(sw, "unsupported switch device id %#x\n",
                           sw->config.device_id);
 
index 122e0e4..1a16fea 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/serial_reg.h>
 #include <linux/dmaengine.h>
 
-#include "../serial_mctrl_gpio.h"
-
 struct uart_8250_dma {
        int (*tx_dma)(struct uart_8250_port *p);
        int (*rx_dma)(struct uart_8250_port *p);
@@ -133,43 +131,12 @@ void serial8250_em485_destroy(struct uart_8250_port *p);
 
 static inline void serial8250_out_MCR(struct uart_8250_port *up, int value)
 {
-       int mctrl_gpio = 0;
-
        serial_out(up, UART_MCR, value);
-
-       if (value & UART_MCR_RTS)
-               mctrl_gpio |= TIOCM_RTS;
-       if (value & UART_MCR_DTR)
-               mctrl_gpio |= TIOCM_DTR;
-
-       mctrl_gpio_set(up->gpios, mctrl_gpio);
 }
 
 static inline int serial8250_in_MCR(struct uart_8250_port *up)
 {
-       int mctrl, mctrl_gpio = 0;
-
-       mctrl = serial_in(up, UART_MCR);
-
-       /* save current MCR values */
-       if (mctrl & UART_MCR_RTS)
-               mctrl_gpio |= TIOCM_RTS;
-       if (mctrl & UART_MCR_DTR)
-               mctrl_gpio |= TIOCM_DTR;
-
-       mctrl_gpio = mctrl_gpio_get_outputs(up->gpios, &mctrl_gpio);
-
-       if (mctrl_gpio & TIOCM_RTS)
-               mctrl |= UART_MCR_RTS;
-       else
-               mctrl &= ~UART_MCR_RTS;
-
-       if (mctrl_gpio & TIOCM_DTR)
-               mctrl |= UART_MCR_DTR;
-       else
-               mctrl &= ~UART_MCR_DTR;
-
-       return mctrl;
+       return serial_in(up, UART_MCR);
 }
 
 #if defined(__alpha__) && !defined(CONFIG_PCI)
index 13ad5c3..dcf43f6 100644 (file)
@@ -974,8 +974,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
 
        uart = serial8250_find_match_or_unused(&up->port);
        if (uart && uart->port.type != PORT_8250_CIR) {
-               struct mctrl_gpios *gpios;
-
                if (uart->port.dev)
                        uart_remove_one_port(&serial8250_reg, &uart->port);
 
@@ -1013,13 +1011,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
                if (up->port.flags & UPF_FIXED_TYPE)
                        uart->port.type = up->port.type;
 
-               gpios = mctrl_gpio_init(&uart->port, 0);
-               if (IS_ERR(gpios)) {
-                       if (PTR_ERR(gpios) != -ENOSYS)
-                               return PTR_ERR(gpios);
-               } else
-                       uart->gpios = gpios;
-
                serial8250_set_defaults(uart);
 
                /* Possibly override default I/O functions.  */
index 737b4b3..0facc78 100644 (file)
@@ -31,7 +31,7 @@
 #define IO_ADDR2 0x60
 #define LDN 0x7
 
-#define IRQ_MODE       0x70
+#define FINTEK_IRQ_MODE        0x70
 #define IRQ_SHARE      BIT(4)
 #define IRQ_MODE_MASK  (BIT(6) | BIT(5))
 #define IRQ_LEVEL_LOW  0
@@ -195,7 +195,7 @@ static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode)
        outb(LDN, pdata->base_port + ADDR_PORT);
        outb(pdata->index, pdata->base_port + DATA_PORT);
 
-       outb(IRQ_MODE, pdata->base_port + ADDR_PORT);
+       outb(FINTEK_IRQ_MODE, pdata->base_port + ADDR_PORT);
        tmp = inb(pdata->base_port + DATA_PORT);
 
        tmp &= ~IRQ_MODE_MASK;
index 339de9c..20c5db2 100644 (file)
@@ -168,6 +168,9 @@ static void mid8250_set_termios(struct uart_port *p,
        unsigned long w = BIT(24) - 1;
        unsigned long mul, div;
 
+       /* Gracefully handle the B0 case: fall back to B9600 */
+       fuart = fuart ? fuart : 9600 * 16;
+
        if (mid->board->freq < fuart) {
                /* Find prescaler value that satisfies Fuart < Fref */
                if (mid->board->freq > baud)
index e14982f..61ad6c3 100644 (file)
@@ -134,21 +134,18 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
 
        serial8250_do_set_mctrl(port, mctrl);
 
-       if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
-                                               UART_GPIO_RTS))) {
-               /*
-                * Turn off autoRTS if RTS is lowered and restore autoRTS
-                * setting if RTS is raised
-                */
-               lcr = serial_in(up, UART_LCR);
-               serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-               if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
-                       priv->efr |= UART_EFR_RTS;
-               else
-                       priv->efr &= ~UART_EFR_RTS;
-               serial_out(up, UART_EFR, priv->efr);
-               serial_out(up, UART_LCR, lcr);
-       }
+       /*
+        * Turn off autoRTS if RTS is lowered and restore autoRTS setting
+        * if RTS is raised
+        */
+       lcr = serial_in(up, UART_LCR);
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+       if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
+               priv->efr |= UART_EFR_RTS;
+       else
+               priv->efr &= ~UART_EFR_RTS;
+       serial_out(up, UART_EFR, priv->efr);
+       serial_out(up, UART_LCR, lcr);
 }
 
 /*
@@ -449,9 +446,7 @@ static void omap_8250_set_termios(struct uart_port *port,
        priv->efr = 0;
        up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
 
-       if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW
-               && IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
-                                                       UART_GPIO_RTS))) {
+       if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
                /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
                up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
                priv->efr |= UART_EFR_CTS;
index 20ebaea..bc51b32 100644 (file)
@@ -1950,6 +1950,43 @@ pci_wch_ch38x_setup(struct serial_private *priv,
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7954       0x7954
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7958       0x7958
 
+#define PCI_VENDOR_ID_ACCESIO                  0x494f
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB    0x1051
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S     0x1053
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB    0x105C
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S     0x105E
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB  0x1091
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2   0x1093
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB  0x1099
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4   0x109B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB   0x10D1
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM    0x10D3
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB   0x10DA
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM    0x10DC
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1   0x1108
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2   0x1110
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2   0x1111
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4   0x1118
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4   0x1119
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S      0x1152
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S      0x115A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2    0x1190
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2   0x1191
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4    0x1198
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4   0x1199
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM     0x11D0
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4    0x105A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4    0x105B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8    0x106A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8    0x106B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4    0x1098
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8    0x10A9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM     0x10D9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM     0x10E9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM     0x11D8
+
+
+
 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584        0x1584
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588        0x1588
@@ -5112,6 +5149,108 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0,
                0, pbn_pericom_PI7C9X7958 },
+       /*
+        * ACCES I/O Products quad
+        */
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7954 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
+       {       PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_pericom_PI7C9X7958 },
        /*
         * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
         */
index 7481b95..bdfa659 100644 (file)
@@ -1618,8 +1618,6 @@ static void serial8250_disable_ms(struct uart_port *port)
        if (up->bugs & UART_BUG_NOMSR)
                return;
 
-       mctrl_gpio_disable_ms(up->gpios);
-
        up->ier &= ~UART_IER_MSI;
        serial_port_out(port, UART_IER, up->ier);
 }
@@ -1632,8 +1630,6 @@ static void serial8250_enable_ms(struct uart_port *port)
        if (up->bugs & UART_BUG_NOMSR)
                return;
 
-       mctrl_gpio_enable_ms(up->gpios);
-
        up->ier |= UART_IER_MSI;
 
        serial8250_rpm_get(up);
@@ -1917,8 +1913,7 @@ unsigned int serial8250_do_get_mctrl(struct uart_port *port)
                ret |= TIOCM_DSR;
        if (status & UART_MSR_CTS)
                ret |= TIOCM_CTS;
-
-       return mctrl_gpio_get(up->gpios, &ret);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl);
 
index c9ec839..7c6f7af 100644 (file)
@@ -6,7 +6,6 @@
 config SERIAL_8250
        tristate "8250/16550 and compatible serial support"
        select SERIAL_CORE
-       select SERIAL_MCTRL_GPIO if GPIOLIB
        ---help---
          This selects whether you want to include the driver for the standard
          serial ports.  The standard answer is Y.  People who might say N
index 065f5d9..b933568 100644 (file)
@@ -949,6 +949,15 @@ static int isr_setup_status_phase(struct ci_hdrc *ci)
        int retval;
        struct ci_hw_ep *hwep;
 
+       /*
+        * Unexpected USB controller behavior, caused by bad signal integrity
+        * or ground reference problems, can lead to isr_setup_status_phase
+        * being called with ci->status equal to NULL.
+        * If this situation occurs, you should review your USB hardware design.
+        */
+       if (WARN_ON_ONCE(!ci->status))
+               return -EPIPE;
+
        hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
        ci->status->context = ci;
        ci->status->complete = isr_setup_status_complete;
@@ -1596,8 +1605,11 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
 {
        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
 
-       /* Data+ pullup controlled by OTG state machine in OTG fsm mode */
-       if (ci_otg_is_fsm_mode(ci))
+       /*
+        * Data+ pullup controlled by OTG state machine in OTG fsm mode;
+        * and don't touch Data+ in host mode for dual role config.
+        */
+       if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
                return 0;
 
        pm_runtime_get_sync(&ci->gadget.dev);
index 0511631..15ce4ab 100644 (file)
@@ -187,7 +187,7 @@ static const unsigned short high_speed_maxpacket_maxes[4] = {
        [USB_ENDPOINT_XFER_CONTROL] = 64,
        [USB_ENDPOINT_XFER_ISOC] = 1024,
        [USB_ENDPOINT_XFER_BULK] = 512,
-       [USB_ENDPOINT_XFER_INT] = 1023,
+       [USB_ENDPOINT_XFER_INT] = 1024,
 };
 static const unsigned short super_speed_maxpacket_maxes[4] = {
        [USB_ENDPOINT_XFER_CONTROL] = 512,
index e6a6d67..09c8d9c 100644 (file)
@@ -1709,11 +1709,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        as->urb->start_frame = uurb->start_frame;
        as->urb->number_of_packets = number_of_packets;
        as->urb->stream_id = stream_id;
-       if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
-                       ps->dev->speed == USB_SPEED_HIGH)
-               as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
-       else
-               as->urb->interval = ep->desc.bInterval;
+
+       if (ep->desc.bInterval) {
+               if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
+                               ps->dev->speed == USB_SPEED_HIGH ||
+                               ps->dev->speed >= USB_SPEED_SUPER)
+                       as->urb->interval = 1 <<
+                                       min(15, ep->desc.bInterval - 1);
+               else
+                       as->urb->interval = ep->desc.bInterval;
+       }
+
        as->urb->context = as;
        as->urb->complete = async_completed;
        for (totlen = u = 0; u < number_of_packets; u++) {
index 9fae029..d645512 100644 (file)
@@ -868,6 +868,7 @@ struct dwc2_hsotg {
        void *priv;
        int     irq;
        struct clk *clk;
+       struct reset_control *reset;
 
        unsigned int queuing_high_bandwidth:1;
        unsigned int srp_success:1;
index fc6f525..530959a 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_data/s3c-hsotg.h>
+#include <linux/reset.h>
 
 #include <linux/usb/of.h>
 
@@ -337,6 +338,24 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
 {
        int i, ret;
 
+       hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2");
+       if (IS_ERR(hsotg->reset)) {
+               ret = PTR_ERR(hsotg->reset);
+               switch (ret) {
+               case -ENOENT:
+               case -ENOTSUPP:
+                       hsotg->reset = NULL;
+                       break;
+               default:
+                       dev_err(hsotg->dev, "error getting reset control %d\n",
+                               ret);
+                       return ret;
+               }
+       }
+
+       if (hsotg->reset)
+               reset_control_deassert(hsotg->reset);
+
        /* Set default UTMI width */
        hsotg->phyif = GUSBCFG_PHYIF16;
 
@@ -434,6 +453,9 @@ static int dwc2_driver_remove(struct platform_device *dev)
        if (hsotg->ll_hw_enabled)
                dwc2_lowlevel_hw_disable(hsotg);
 
+       if (hsotg->reset)
+               reset_control_assert(hsotg->reset);
+
        return 0;
 }
 
index 9466431..35d0924 100644 (file)
@@ -1192,6 +1192,7 @@ static int dwc3_runtime_resume(struct device *dev)
        }
 
        pm_runtime_mark_last_busy(dev);
+       pm_runtime_put(dev);
 
        return 0;
 }
index 22dfc3d..33ab2a2 100644 (file)
@@ -192,7 +192,7 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
        int ret;
 
        ret = sprintf(str, "ep%d%s: ", epnum >> 1,
-                       (epnum & 1) ? "in" : "in");
+                       (epnum & 1) ? "in" : "out");
        if (ret < 0)
                return "UNKNOWN";
 
index 2eb84d6..6df0f5d 100644 (file)
@@ -243,6 +243,15 @@ static int dwc3_pci_runtime_suspend(struct device *dev)
        return -EBUSY;
 }
 
+static int dwc3_pci_runtime_resume(struct device *dev)
+{
+       struct platform_device *dwc3 = dev_get_drvdata(dev);
+
+       return pm_runtime_get(&dwc3->dev);
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
 static int dwc3_pci_pm_dummy(struct device *dev)
 {
        /*
@@ -255,11 +264,11 @@ static int dwc3_pci_pm_dummy(struct device *dev)
         */
        return 0;
 }
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
 
 static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
-       SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_pm_dummy,
+       SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
                NULL)
 };
 
index 1f5597e..122e64d 100644 (file)
@@ -1433,7 +1433,7 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
 
 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
 {
-       unsigned long           timeout;
+       int                     retries;
 
        int                     ret;
        u32                     reg;
@@ -1484,9 +1484,9 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
        }
 
        /* poll until Link State changes to ON */
-       timeout = jiffies + msecs_to_jiffies(100);
+       retries = 20000;
 
-       while (!time_after(jiffies, timeout)) {
+       while (retries--) {
                reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 
                /* in HS, means ON */
index d58bfc3..007ec6e 100644 (file)
@@ -341,11 +341,15 @@ static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
 {
        struct sk_buff  *skb2 = NULL;
        struct usb_ep   *in = port->in_ep;
-       int             padlen = 0;
-       u16             len = skb->len;
+       int             headroom, tailroom, padlen = 0;
+       u16             len;
 
-       int headroom = skb_headroom(skb);
-       int tailroom = skb_tailroom(skb);
+       if (!skb)
+               return NULL;
+
+       len = skb->len;
+       headroom = skb_headroom(skb);
+       tailroom = skb_tailroom(skb);
 
        /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0,
         * stick two bytes of zero-length EEM packet on the end.
index c800582..16562e4 100644 (file)
@@ -374,6 +374,9 @@ static struct sk_buff *rndis_add_header(struct gether *port,
 {
        struct sk_buff *skb2;
 
+       if (!skb)
+               return NULL;
+
        skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
        rndis_add_hdr(skb2);
 
index 6ded634..e0cd1e4 100644 (file)
@@ -375,10 +375,15 @@ __acquires(&port->port_lock)
 */
 {
        struct list_head        *pool = &port->write_pool;
-       struct usb_ep           *in = port->port_usb->in;
+       struct usb_ep           *in;
        int                     status = 0;
        bool                    do_tty_wake = false;
 
+       if (!port->port_usb)
+               return status;
+
+       in = port->port_usb->in;
+
        while (!port->write_busy && !list_empty(pool)) {
                struct usb_request      *req;
                int                     len;
index 934f838..40c04bb 100644 (file)
@@ -827,7 +827,7 @@ void usb_gadget_unmap_request_by_dev(struct device *dev,
                return;
 
        if (req->num_mapped_sgs) {
-               dma_unmap_sg(dev, req->sg, req->num_mapped_sgs,
+               dma_unmap_sg(dev, req->sg, req->num_sgs,
                                is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
                req->num_mapped_sgs = 0;
index cf8819a..8bb011e 100644 (file)
@@ -1878,11 +1878,8 @@ static int qe_get_frame(struct usb_gadget *gadget)
 
        tmp = in_be16(&udc->usb_param->frame_n);
        if (tmp & 0x8000)
-               tmp = tmp & 0x07ff;
-       else
-               tmp = -EINVAL;
-
-       return (int)tmp;
+               return tmp & 0x07ff;
+       return -EINVAL;
 }
 
 static int fsl_qe_start(struct usb_gadget *gadget,
index 93a3bec..fb8fc34 100644 (file)
 
 /* DRD_CON */
 #define DRD_CON_PERI_CON       BIT(24)
+#define DRD_CON_VBOUT          BIT(0)
 
 /* USB_INT_ENA_1 and USB_INT_STA_1 */
 #define USB_INT_1_B3_PLLWKUP   BIT(31)
@@ -363,6 +364,7 @@ static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
 {
        /* FIXME: How to change host / peripheral mode as well? */
        usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
+       usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
 
        usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
        usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
index fd9fd12..797137e 100644 (file)
@@ -850,6 +850,10 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
        spin_lock_irqsave(&xhci->lock, flags);
 
        ep->stop_cmds_pending--;
+       if (xhci->xhc_state & XHCI_STATE_REMOVING) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               return;
+       }
        if (xhci->xhc_state & XHCI_STATE_DYING) {
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                "Stop EP timer ran, but another timer marked "
@@ -903,7 +907,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
        spin_unlock_irqrestore(&xhci->lock, flags);
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "Calling usb_hc_died()");
-       usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
+       usb_hc_died(xhci_to_hcd(xhci));
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "xHCI host controller is dead.");
 }
index 192248f..fe08e77 100644 (file)
@@ -290,6 +290,7 @@ int musb_hub_control(
        u32             temp;
        int             retval = 0;
        unsigned long   flags;
+       bool            start_musb = false;
 
        spin_lock_irqsave(&musb->lock, flags);
 
@@ -390,7 +391,7 @@ int musb_hub_control(
                         * logic relating to VBUS power-up.
                         */
                        if (!hcd->self.is_b_host && musb_has_gadget(musb))
-                               musb_start(musb);
+                               start_musb = true;
                        break;
                case USB_PORT_FEAT_RESET:
                        musb_port_reset(musb, true);
@@ -451,5 +452,9 @@ error:
                retval = -EPIPE;
        }
        spin_unlock_irqrestore(&musb->lock, flags);
+
+       if (start_musb)
+               musb_start(musb);
+
        return retval;
 }
index 980c9de..427efb5 100644 (file)
@@ -144,14 +144,18 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
 int usb_gen_phy_init(struct usb_phy *phy)
 {
        struct usb_phy_generic *nop = dev_get_drvdata(phy->dev);
+       int ret;
 
        if (!IS_ERR(nop->vcc)) {
                if (regulator_enable(nop->vcc))
                        dev_err(phy->dev, "Failed to enable power\n");
        }
 
-       if (!IS_ERR(nop->clk))
-               clk_prepare_enable(nop->clk);
+       if (!IS_ERR(nop->clk)) {
+               ret = clk_prepare_enable(nop->clk);
+               if (ret)
+                       return ret;
+       }
 
        nop_reset(nop);
 
index d4be5d5..28965ef 100644 (file)
@@ -282,9 +282,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data)
        if (usbhs_mod_is_host(priv))
                usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC);
 
-       usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
+       /*
+        * The driver should not clear the xxxSTS after the line of
+        * "call irq callback functions" because each "if" statement is
+        * possible to call the callback function for avoiding any side effects.
+        */
+       if (irq_state.intsts0 & BRDY)
+               usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
        usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts);
-       usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
+       if (irq_state.intsts0 & BEMP)
+               usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
 
        /*
         * call irq callback functions
index 92bc83b..c4c6474 100644 (file)
@@ -1076,7 +1076,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
 
        gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
        dev_info(dev, "%stransceiver found\n",
-                gpriv->transceiver ? "" : "no ");
+                !IS_ERR(gpriv->transceiver) ? "" : "no ");
 
        /*
         * CAUTION
index 5608af4..de9992b 100644 (file)
@@ -1252,7 +1252,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
 
        if (urb->transfer_buffer == NULL) {
                urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
-                                              GFP_KERNEL);
+                                              GFP_ATOMIC);
                if (!urb->transfer_buffer)
                        goto exit;
        }
index ed378fb..57426d7 100644 (file)
@@ -1340,8 +1340,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
        }
 
        if (urb->transfer_buffer == NULL) {
-               urb->transfer_buffer =
-                   kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
+               urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+                                              GFP_ATOMIC);
                if (!urb->transfer_buffer)
                        goto exit;
        }
index bc47258..9894e34 100644 (file)
@@ -525,6 +525,12 @@ static void option_instat_callback(struct urb *urb);
 #define VIATELECOM_VENDOR_ID                   0x15eb
 #define VIATELECOM_PRODUCT_CDS7                        0x0001
 
+/* WeTelecom products */
+#define WETELECOM_VENDOR_ID                    0x22de
+#define WETELECOM_PRODUCT_WMD200               0x6801
+#define WETELECOM_PRODUCT_6802                 0x6802
+#define WETELECOM_PRODUCT_WMD300               0x6803
+
 struct option_blacklist_info {
        /* bitmask of interface numbers blacklisted for send_setup */
        const unsigned long sendsetup;
@@ -1991,6 +1997,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index e383ecd..ed9c9ee 100644 (file)
@@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
  * making all of the arch DMA ops work on the vring device itself
  * is a mess.  For now, we use the parent device for DMA ops.
  */
-struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
 {
        return vq->vq.vdev->dev.parent;
 }
index 7f6aff3..e5495f3 100644 (file)
@@ -853,6 +853,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
                current->flags |= PF_RANDOMIZE;
 
        setup_new_exec(bprm);
+       install_exec_creds(bprm);
 
        /* Do this so that we can load the interpreter, if need be.  We will
           change some of these later */
@@ -1044,7 +1045,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
                goto out;
 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
 
-       install_exec_creds(bprm);
        retval = create_elf_tables(bprm, &loc->elf_ex,
                          load_addr, interp_load_addr);
        if (retval < 0)
index eff3993..33fe035 100644 (file)
@@ -427,6 +427,7 @@ struct btrfs_space_info {
        struct list_head ro_bgs;
        struct list_head priority_tickets;
        struct list_head tickets;
+       u64 tickets_id;
 
        struct rw_semaphore groups_sem;
        /* for block groups in our same type */
index 0450dc4..38c2df8 100644 (file)
@@ -4901,11 +4901,6 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
        u64 expected;
        u64 to_reclaim = 0;
 
-       to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
-       if (can_overcommit(root, space_info, to_reclaim,
-                          BTRFS_RESERVE_FLUSH_ALL))
-               return 0;
-
        list_for_each_entry(ticket, &space_info->tickets, list)
                to_reclaim += ticket->bytes;
        list_for_each_entry(ticket, &space_info->priority_tickets, list)
@@ -4913,6 +4908,11 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
        if (to_reclaim)
                return to_reclaim;
 
+       to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
+       if (can_overcommit(root, space_info, to_reclaim,
+                          BTRFS_RESERVE_FLUSH_ALL))
+               return 0;
+
        used = space_info->bytes_used + space_info->bytes_reserved +
               space_info->bytes_pinned + space_info->bytes_readonly +
               space_info->bytes_may_use;
@@ -4966,12 +4966,12 @@ static void wake_all_tickets(struct list_head *head)
  */
 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
 {
-       struct reserve_ticket *last_ticket = NULL;
        struct btrfs_fs_info *fs_info;
        struct btrfs_space_info *space_info;
        u64 to_reclaim;
        int flush_state;
        int commit_cycles = 0;
+       u64 last_tickets_id;
 
        fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
        space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
@@ -4984,8 +4984,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
                spin_unlock(&space_info->lock);
                return;
        }
-       last_ticket = list_first_entry(&space_info->tickets,
-                                      struct reserve_ticket, list);
+       last_tickets_id = space_info->tickets_id;
        spin_unlock(&space_info->lock);
 
        flush_state = FLUSH_DELAYED_ITEMS_NR;
@@ -5005,10 +5004,10 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
                                                              space_info);
                ticket = list_first_entry(&space_info->tickets,
                                          struct reserve_ticket, list);
-               if (last_ticket == ticket) {
+               if (last_tickets_id == space_info->tickets_id) {
                        flush_state++;
                } else {
-                       last_ticket = ticket;
+                       last_tickets_id = space_info->tickets_id;
                        flush_state = FLUSH_DELAYED_ITEMS_NR;
                        if (commit_cycles)
                                commit_cycles--;
@@ -5384,6 +5383,7 @@ again:
                        list_del_init(&ticket->list);
                        num_bytes -= ticket->bytes;
                        ticket->bytes = 0;
+                       space_info->tickets_id++;
                        wake_up(&ticket->wait);
                } else {
                        ticket->bytes -= num_bytes;
@@ -5426,6 +5426,7 @@ again:
                        num_bytes -= ticket->bytes;
                        space_info->bytes_may_use += ticket->bytes;
                        ticket->bytes = 0;
+                       space_info->tickets_id++;
                        wake_up(&ticket->wait);
                } else {
                        trace_btrfs_space_reservation(fs_info, "space_info",
@@ -8216,6 +8217,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
 {
        int ret;
        struct btrfs_block_group_cache *block_group;
+       struct btrfs_space_info *space_info;
 
        /*
         * Mixed block groups will exclude before processing the log so we only
@@ -8231,9 +8233,14 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
        if (!block_group)
                return -EINVAL;
 
-       ret = btrfs_add_reserved_bytes(block_group, ins->offset,
-                                      ins->offset, 0);
-       BUG_ON(ret); /* logic error */
+       space_info = block_group->space_info;
+       spin_lock(&space_info->lock);
+       spin_lock(&block_group->lock);
+       space_info->bytes_reserved += ins->offset;
+       block_group->reserved += ins->offset;
+       spin_unlock(&block_group->lock);
+       spin_unlock(&space_info->lock);
+
        ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
                                         0, owner, offset, ins, 1);
        btrfs_put_block_group(block_group);
index 8a2c2a0..c0c13dc 100644 (file)
@@ -4200,9 +4200,11 @@ restart:
                err = PTR_ERR(trans);
                goto out_free;
        }
-       err = qgroup_fix_relocated_data_extents(trans, rc);
-       if (err < 0) {
-               btrfs_abort_transaction(trans, err);
+       ret = qgroup_fix_relocated_data_extents(trans, rc);
+       if (ret < 0) {
+               btrfs_abort_transaction(trans, ret);
+               if (!err)
+                       err = ret;
                goto out_free;
        }
        btrfs_commit_transaction(trans, rc->extent_root);
index efe129f..a87675f 100644 (file)
@@ -4268,10 +4268,12 @@ static int process_all_refs(struct send_ctx *sctx,
        }
        btrfs_release_path(path);
 
+       /*
+        * We don't actually care about pending_move as we are simply
+        * re-creating this inode and will be rename'ing it into place once we
+        * rename the parent directory.
+        */
        ret = process_recorded_refs(sctx, &pending_move);
-       /* Only applicable to an incremental send. */
-       ASSERT(pending_move == 0);
-
 out:
        btrfs_free_path(path);
        return ret;
index e935035..ef9c55b 100644 (file)
@@ -2867,6 +2867,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 
        if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
                blk_finish_plug(&plug);
+               list_del_init(&root_log_ctx.list);
                mutex_unlock(&log_root_tree->log_mutex);
                ret = root_log_ctx.log_ret;
                goto out;
index c64a0b7..df4b3e6 100644 (file)
@@ -597,7 +597,7 @@ static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
        if (is_hash_order(new_pos)) {
                /* no need to reset last_name for a forward seek when
                 * dentries are sotred in hash order */
-       } else if (fi->frag |= fpos_frag(new_pos)) {
+       } else if (fi->frag != fpos_frag(new_pos)) {
                return true;
        }
        rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
index 0f9961e..ed115ac 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/random.h>
 #include <linux/string.h>
 #include <linux/fscrypto.h>
+#include <linux/mount.h>
 
 static int inode_has_encryption_context(struct inode *inode)
 {
@@ -92,26 +93,42 @@ static int create_encryption_context_from_policy(struct inode *inode,
        return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
 }
 
-int fscrypt_process_policy(struct inode *inode,
+int fscrypt_process_policy(struct file *filp,
                                const struct fscrypt_policy *policy)
 {
+       struct inode *inode = file_inode(filp);
+       int ret;
+
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+
        if (policy->version != 0)
                return -EINVAL;
 
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
        if (!inode_has_encryption_context(inode)) {
-               if (!inode->i_sb->s_cop->empty_dir)
-                       return -EOPNOTSUPP;
-               if (!inode->i_sb->s_cop->empty_dir(inode))
-                       return -ENOTEMPTY;
-               return create_encryption_context_from_policy(inode, policy);
+               if (!S_ISDIR(inode->i_mode))
+                       ret = -EINVAL;
+               else if (!inode->i_sb->s_cop->empty_dir)
+                       ret = -EOPNOTSUPP;
+               else if (!inode->i_sb->s_cop->empty_dir(inode))
+                       ret = -ENOTEMPTY;
+               else
+                       ret = create_encryption_context_from_policy(inode,
+                                                                   policy);
+       } else if (!is_encryption_context_consistent_with_policy(inode,
+                                                                policy)) {
+               printk(KERN_WARNING
+                      "%s: Policy inconsistent with encryption context\n",
+                      __func__);
+               ret = -EINVAL;
        }
 
-       if (is_encryption_context_consistent_with_policy(inode, policy))
-               return 0;
-
-       printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
-              __func__);
-       return -EINVAL;
+       mnt_drop_write_file(filp);
+       return ret;
 }
 EXPORT_SYMBOL(fscrypt_process_policy);
 
index d116453..79a5941 100644 (file)
@@ -585,7 +585,8 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
  */
 void *devpts_get_priv(struct dentry *dentry)
 {
-       WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
+       if (dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC)
+               return NULL;
        return dentry->d_fsdata;
 }
 
index 10686fd..1bb7df5 100644 (file)
@@ -776,7 +776,7 @@ resizefs_out:
                                   (struct fscrypt_policy __user *)arg,
                                   sizeof(policy)))
                        return -EFAULT;
-               return fscrypt_process_policy(inode, &policy);
+               return fscrypt_process_policy(filp, &policy);
 #else
                return -EOPNOTSUPP;
 #endif
index 47abb96..28f4f4c 100644 (file)
@@ -1757,21 +1757,14 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
 {
        struct fscrypt_policy policy;
        struct inode *inode = file_inode(filp);
-       int ret;
 
        if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
                                                        sizeof(policy)))
                return -EFAULT;
 
-       ret = mnt_want_write_file(filp);
-       if (ret)
-               return ret;
-
        f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
-       ret = fscrypt_process_policy(inode, &policy);
 
-       mnt_drop_write_file(filp);
-       return ret;
+       return fscrypt_process_policy(filp, &policy);
 }
 
 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
index f394aff..3988b43 100644 (file)
@@ -530,13 +530,13 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
        req->out.args[0].size = count;
 }
 
-static void fuse_release_user_pages(struct fuse_req *req, int write)
+static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty)
 {
        unsigned i;
 
        for (i = 0; i < req->num_pages; i++) {
                struct page *page = req->pages[i];
-               if (write)
+               if (should_dirty)
                        set_page_dirty_lock(page);
                put_page(page);
        }
@@ -1320,6 +1320,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                       loff_t *ppos, int flags)
 {
        int write = flags & FUSE_DIO_WRITE;
+       bool should_dirty = !write && iter_is_iovec(iter);
        int cuse = flags & FUSE_DIO_CUSE;
        struct file *file = io->file;
        struct inode *inode = file->f_mapping->host;
@@ -1363,7 +1364,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                        nres = fuse_send_read(req, io, pos, nbytes, owner);
 
                if (!io->async)
-                       fuse_release_user_pages(req, !write);
+                       fuse_release_user_pages(req, should_dirty);
                if (req->out.h.error) {
                        err = req->out.h.error;
                        break;
index 0342254..706270f 100644 (file)
@@ -428,9 +428,12 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
                break;
        }
 
+       if (iomap->flags & IOMAP_F_MERGED)
+               flags |= FIEMAP_EXTENT_MERGED;
+
        return fiemap_fill_next_extent(fi, iomap->offset,
                        iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
-                       iomap->length, flags | FIEMAP_EXTENT_MERGED);
+                       iomap->length, flags);
 
 }
 
index e157400..2bcb86e 100644 (file)
@@ -840,21 +840,35 @@ repeat:
        mutex_lock(&kernfs_mutex);
 
        list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
+               struct kernfs_node *parent;
                struct inode *inode;
-               struct dentry *dentry;
 
+               /*
+                * We want fsnotify_modify() on @kn but as the
+                * modifications aren't originating from userland don't
+                * have the matching @file available.  Look up the inodes
+                * and generate the events manually.
+                */
                inode = ilookup(info->sb, kn->ino);
                if (!inode)
                        continue;
 
-               dentry = d_find_any_alias(inode);
-               if (dentry) {
-                       fsnotify_parent(NULL, dentry, FS_MODIFY);
-                       fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
-                                NULL, 0);
-                       dput(dentry);
+               parent = kernfs_get_parent(kn);
+               if (parent) {
+                       struct inode *p_inode;
+
+                       p_inode = ilookup(info->sb, parent->ino);
+                       if (p_inode) {
+                               fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
+                                        inode, FSNOTIFY_EVENT_INODE, kn->name, 0);
+                               iput(p_inode);
+                       }
+
+                       kernfs_put(parent);
                }
 
+               fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
+                        kn->name, 0);
                iput(inode);
        }
 
index f55a4e7..2178476 100644 (file)
@@ -346,7 +346,7 @@ static void bl_write_cleanup(struct work_struct *work)
                        PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
 
                ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
-                                       (end - start) >> SECTOR_SHIFT);
+                                       (end - start) >> SECTOR_SHIFT, end);
        }
 
        pnfs_ld_write_done(hdr);
index 18e6fd0..efc007f 100644 (file)
@@ -141,6 +141,7 @@ struct pnfs_block_layout {
        struct rb_root          bl_ext_ro;
        spinlock_t              bl_ext_lock;   /* Protects list manipulation */
        bool                    bl_scsi_layout;
+       u64                     bl_lwb;
 };
 
 static inline struct pnfs_block_layout *
@@ -182,7 +183,7 @@ int ext_tree_insert(struct pnfs_block_layout *bl,
 int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, sector_t start,
                sector_t end);
 int ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
-               sector_t len);
+               sector_t len, u64 lwb);
 bool ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect,
                struct pnfs_block_extent *ret, bool rw);
 int ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg);
index 992bcb1..c85fbfd 100644 (file)
@@ -402,7 +402,7 @@ ext_tree_split(struct rb_root *root, struct pnfs_block_extent *be,
 
 int
 ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
-               sector_t len)
+               sector_t len, u64 lwb)
 {
        struct rb_root *root = &bl->bl_ext_rw;
        sector_t end = start + len;
@@ -471,6 +471,8 @@ ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
                }
        }
 out:
+       if (bl->bl_lwb < lwb)
+               bl->bl_lwb = lwb;
        spin_unlock(&bl->bl_ext_lock);
 
        __ext_put_deviceids(&tmp);
@@ -518,7 +520,7 @@ static __be32 *encode_scsi_range(struct pnfs_block_extent *be, __be32 *p)
 }
 
 static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
-               size_t buffer_size, size_t *count)
+               size_t buffer_size, size_t *count, __u64 *lastbyte)
 {
        struct pnfs_block_extent *be;
        int ret = 0;
@@ -542,6 +544,8 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
                        p = encode_block_extent(be, p);
                be->be_tag = EXTENT_COMMITTING;
        }
+       *lastbyte = bl->bl_lwb - 1;
+       bl->bl_lwb = 0;
        spin_unlock(&bl->bl_ext_lock);
 
        return ret;
@@ -564,7 +568,7 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
        arg->layoutupdate_pages = &arg->layoutupdate_page;
 
 retry:
-       ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count);
+       ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count, &arg->lastbytewritten);
        if (unlikely(ret)) {
                ext_tree_free_commitdata(arg, buffer_size);
 
index a7f2e6e..52a2831 100644 (file)
@@ -275,6 +275,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
 err_socks:
        svc_rpcb_cleanup(serv, net);
 err_bind:
+       nn->cb_users[minorversion]--;
        dprintk("NFS: Couldn't create callback socket: err = %d; "
                        "net = %p\n", ret, net);
        return ret;
index c92a75e..f953ef6 100644 (file)
@@ -454,11 +454,8 @@ static bool referring_call_exists(struct nfs_client *clp,
                                ((u32 *)&rclist->rcl_sessionid.data)[3],
                                ref->rc_sequenceid, ref->rc_slotid);
 
-                       spin_lock(&tbl->slot_tbl_lock);
-                       status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
-                                 tbl->slots[ref->rc_slotid].seq_nr ==
-                                       ref->rc_sequenceid);
-                       spin_unlock(&tbl->slot_tbl_lock);
+                       status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
+                                       ref->rc_sequenceid, HZ >> 1) < 0;
                        if (status)
                                goto out;
                }
@@ -487,7 +484,6 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
                goto out;
 
        tbl = &clp->cl_session->bc_slot_table;
-       slot = tbl->slots + args->csa_slotid;
 
        /* Set up res before grabbing the spinlock */
        memcpy(&res->csr_sessionid, &args->csa_sessionid,
index 003ebce..1e10678 100644 (file)
@@ -426,7 +426,7 @@ EXPORT_SYMBOL_GPL(nfs_mark_client_ready);
  * Initialise the timeout values for a connection
  */
 void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
-                                   unsigned int timeo, unsigned int retrans)
+                                   int timeo, int retrans)
 {
        to->to_initval = timeo * HZ / 10;
        to->to_retries = retrans;
@@ -434,9 +434,9 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
        switch (proto) {
        case XPRT_TRANSPORT_TCP:
        case XPRT_TRANSPORT_RDMA:
-               if (to->to_retries == 0)
+               if (retrans == NFS_UNSPEC_RETRANS)
                        to->to_retries = NFS_DEF_TCP_RETRANS;
-               if (to->to_initval == 0)
+               if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
                        to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
                if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
                        to->to_initval = NFS_MAX_TCP_TIMEOUT;
@@ -449,9 +449,9 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
                to->to_exponential = 0;
                break;
        case XPRT_TRANSPORT_UDP:
-               if (to->to_retries == 0)
+               if (retrans == NFS_UNSPEC_RETRANS)
                        to->to_retries = NFS_DEF_UDP_RETRANS;
-               if (!to->to_initval)
+               if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
                        to->to_initval = NFS_DEF_UDP_TIMEO * HZ / 10;
                if (to->to_initval > NFS_MAX_UDP_TIMEOUT)
                        to->to_initval = NFS_MAX_UDP_TIMEOUT;
index e6206ea..51b5136 100644 (file)
@@ -37,6 +37,7 @@ ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
        if (ffl) {
                INIT_LIST_HEAD(&ffl->error_list);
                INIT_LIST_HEAD(&ffl->mirrors);
+               ffl->last_report_time = ktime_get();
                return &ffl->generic_hdr;
        } else
                return NULL;
@@ -640,19 +641,18 @@ nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
 {
        static const ktime_t notime = {0};
        s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
+       struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
 
        nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
        if (ktime_equal(mirror->start_time, notime))
                mirror->start_time = now;
-       if (ktime_equal(mirror->last_report_time, notime))
-               mirror->last_report_time = now;
        if (mirror->report_interval != 0)
                report_interval = (s64)mirror->report_interval * 1000LL;
        else if (layoutstats_timer != 0)
                report_interval = (s64)layoutstats_timer * 1000LL;
-       if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
+       if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
                        report_interval) {
-               mirror->last_report_time = now;
+               ffl->last_report_time = now;
                return true;
        }
 
@@ -806,11 +806,14 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
 {
        struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
        struct nfs4_pnfs_ds *ds;
+       bool fail_return = false;
        int idx;
 
        /* mirrors are sorted by efficiency */
        for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
-               ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
+               if (idx+1 == fls->mirror_array_cnt)
+                       fail_return = true;
+               ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
                if (ds) {
                        *best_idx = idx;
                        return ds;
@@ -859,6 +862,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
        struct nfs4_pnfs_ds *ds;
        int ds_idx;
 
+retry:
        /* Use full layout for now */
        if (!pgio->pg_lseg)
                ff_layout_pg_get_read(pgio, req, false);
@@ -871,10 +875,13 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
 
        ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
        if (!ds) {
-               if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
-                       goto out_pnfs;
-               else
+               if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
                        goto out_mds;
+               pnfs_put_lseg(pgio->pg_lseg);
+               pgio->pg_lseg = NULL;
+               /* Sleep for 1 second before retrying */
+               ssleep(1);
+               goto retry;
        }
 
        mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
@@ -890,12 +897,6 @@ out_mds:
        pnfs_put_lseg(pgio->pg_lseg);
        pgio->pg_lseg = NULL;
        nfs_pageio_reset_read_mds(pgio);
-       return;
-
-out_pnfs:
-       pnfs_set_lo_fail(pgio->pg_lseg);
-       pnfs_put_lseg(pgio->pg_lseg);
-       pgio->pg_lseg = NULL;
 }
 
 static void
@@ -909,6 +910,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
        int i;
        int status;
 
+retry:
        if (!pgio->pg_lseg) {
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   req->wb_context,
@@ -940,10 +942,13 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
        for (i = 0; i < pgio->pg_mirror_count; i++) {
                ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
                if (!ds) {
-                       if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
-                               goto out_pnfs;
-                       else
+                       if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
                                goto out_mds;
+                       pnfs_put_lseg(pgio->pg_lseg);
+                       pgio->pg_lseg = NULL;
+                       /* Sleep for 1 second before retrying */
+                       ssleep(1);
+                       goto retry;
                }
                pgm = &pgio->pg_mirrors[i];
                mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
@@ -956,12 +961,6 @@ out_mds:
        pnfs_put_lseg(pgio->pg_lseg);
        pgio->pg_lseg = NULL;
        nfs_pageio_reset_write_mds(pgio);
-       return;
-
-out_pnfs:
-       pnfs_set_lo_fail(pgio->pg_lseg);
-       pnfs_put_lseg(pgio->pg_lseg);
-       pgio->pg_lseg = NULL;
 }
 
 static unsigned int
index 1bcdb15..3ee0c9f 100644 (file)
@@ -84,7 +84,6 @@ struct nfs4_ff_layout_mirror {
        struct nfs4_ff_layoutstat       read_stat;
        struct nfs4_ff_layoutstat       write_stat;
        ktime_t                         start_time;
-       ktime_t                         last_report_time;
        u32                             report_interval;
 };
 
@@ -101,6 +100,7 @@ struct nfs4_flexfile_layout {
        struct pnfs_ds_commit_info commit_info;
        struct list_head        mirrors;
        struct list_head        error_list; /* nfs4_ff_layout_ds_err */
+       ktime_t                 last_report_time; /* Layoutstat report times */
 };
 
 static inline struct nfs4_flexfile_layout *
index 0aa36be..f7a3f6b 100644 (file)
@@ -17,8 +17,8 @@
 
 #define NFSDBG_FACILITY                NFSDBG_PNFS_LD
 
-static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
-static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
+static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
+static unsigned int dataserver_retrans;
 
 void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
 {
@@ -379,7 +379,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
 
        devid = &mirror->mirror_ds->id_node;
        if (ff_layout_test_devid_unavailable(devid))
-               goto out;
+               goto out_fail;
 
        ds = mirror->mirror_ds->ds;
        /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
@@ -405,15 +405,16 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
                        mirror->mirror_ds->ds_versions[0].rsize = max_payload;
                if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
                        mirror->mirror_ds->ds_versions[0].wsize = max_payload;
-       } else {
-               ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
-                                        mirror, lseg->pls_range.offset,
-                                        lseg->pls_range.length, NFS4ERR_NXIO,
-                                        OP_ILLEGAL, GFP_NOIO);
-               if (fail_return || !ff_layout_has_available_ds(lseg))
-                       pnfs_error_mark_layout_for_return(ino, lseg);
-               ds = NULL;
+               goto out;
        }
+       ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
+                                mirror, lseg->pls_range.offset,
+                                lseg->pls_range.length, NFS4ERR_NXIO,
+                                OP_ILLEGAL, GFP_NOIO);
+out_fail:
+       if (fail_return || !ff_layout_has_available_ds(lseg))
+               pnfs_error_mark_layout_for_return(ino, lseg);
+       ds = NULL;
 out:
        return ds;
 }
index 7ce5e02..74935a1 100644 (file)
@@ -58,6 +58,9 @@ struct nfs_clone_mount {
  */
 #define NFS_UNSPEC_PORT                (-1)
 
+#define NFS_UNSPEC_RETRANS     (UINT_MAX)
+#define NFS_UNSPEC_TIMEO       (UINT_MAX)
+
 /*
  * Maximum number of pages that readdir can use for creating
  * a vmapped array of pages.
@@ -156,7 +159,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *,
 int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *);
 void nfs_server_insert_lists(struct nfs_server *);
 void nfs_server_remove_lists(struct nfs_server *);
-void nfs_init_timeout_values(struct rpc_timeout *, int, unsigned int, unsigned int);
+void nfs_init_timeout_values(struct rpc_timeout *to, int proto, int timeo, int retrans);
 int nfs_init_server_rpcclient(struct nfs_server *, const struct rpc_timeout *t,
                rpc_authflavor_t);
 struct nfs_server *nfs_alloc_server(void);
index 6f47527..64b43b4 100644 (file)
@@ -318,10 +318,22 @@ static void
 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
 {
        struct nfs42_layoutstat_data *data = calldata;
-       struct nfs_server *server = NFS_SERVER(data->args.inode);
+       struct inode *inode = data->inode;
+       struct nfs_server *server = NFS_SERVER(inode);
+       struct pnfs_layout_hdr *lo;
 
+       spin_lock(&inode->i_lock);
+       lo = NFS_I(inode)->layout;
+       if (!pnfs_layout_is_valid(lo)) {
+               spin_unlock(&inode->i_lock);
+               rpc_exit(task, 0);
+               return;
+       }
+       nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
+       spin_unlock(&inode->i_lock);
        nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args,
                             &data->res.seq_res, task);
+
 }
 
 static void
@@ -341,11 +353,11 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
        case -NFS4ERR_ADMIN_REVOKED:
        case -NFS4ERR_DELEG_REVOKED:
        case -NFS4ERR_STALE_STATEID:
-       case -NFS4ERR_OLD_STATEID:
        case -NFS4ERR_BAD_STATEID:
                spin_lock(&inode->i_lock);
                lo = NFS_I(inode)->layout;
-               if (lo && nfs4_stateid_match(&data->args.stateid,
+               if (pnfs_layout_is_valid(lo) &&
+                   nfs4_stateid_match(&data->args.stateid,
                                             &lo->plh_stateid)) {
                        LIST_HEAD(head);
 
@@ -359,11 +371,23 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
                } else
                        spin_unlock(&inode->i_lock);
                break;
+       case -NFS4ERR_OLD_STATEID:
+               spin_lock(&inode->i_lock);
+               lo = NFS_I(inode)->layout;
+               if (pnfs_layout_is_valid(lo) &&
+                   nfs4_stateid_match_other(&data->args.stateid,
+                                       &lo->plh_stateid)) {
+                       /* Do we need to delay before resending? */
+                       if (!nfs4_stateid_is_newer(&lo->plh_stateid,
+                                               &data->args.stateid))
+                               rpc_delay(task, HZ);
+                       rpc_restart_call_prepare(task);
+               }
+               spin_unlock(&inode->i_lock);
+               break;
        case -ENOTSUPP:
        case -EOPNOTSUPP:
                NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
-       default:
-               break;
        }
 
        dprintk("%s server returns %d\n", __func__, task->tk_status);
index 8d7d08d..cd3b7cf 100644 (file)
@@ -817,6 +817,11 @@ static int nfs4_set_client(struct nfs_server *server,
                goto error;
        }
 
+       if (server->nfs_client == clp) {
+               error = -ELOOP;
+               goto error;
+       }
+
        /*
         * Query for the lease time on clientid setup or renewal
         *
index 1949bbd..f5aecaa 100644 (file)
@@ -634,15 +634,11 @@ out_sleep:
 }
 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
 
-static int nfs40_sequence_done(struct rpc_task *task,
-                              struct nfs4_sequence_res *res)
+static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
 {
        struct nfs4_slot *slot = res->sr_slot;
        struct nfs4_slot_table *tbl;
 
-       if (slot == NULL)
-               goto out;
-
        tbl = slot->table;
        spin_lock(&tbl->slot_tbl_lock);
        if (!nfs41_wake_and_assign_slot(tbl, slot))
@@ -650,7 +646,13 @@ static int nfs40_sequence_done(struct rpc_task *task,
        spin_unlock(&tbl->slot_tbl_lock);
 
        res->sr_slot = NULL;
-out:
+}
+
+static int nfs40_sequence_done(struct rpc_task *task,
+                              struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot != NULL)
+               nfs40_sequence_free_slot(res);
        return 1;
 }
 
@@ -666,6 +668,11 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
        tbl = slot->table;
        session = tbl->session;
 
+       /* Bump the slot sequence number */
+       if (slot->seq_done)
+               slot->seq_nr++;
+       slot->seq_done = 0;
+
        spin_lock(&tbl->slot_tbl_lock);
        /* Be nice to the server: try to ensure that the last transmitted
         * value for highest_user_slotid <= target_highest_slotid
@@ -686,9 +693,12 @@ out_unlock:
        res->sr_slot = NULL;
        if (send_new_highest_used_slotid)
                nfs41_notify_server(session->clp);
+       if (waitqueue_active(&tbl->slot_waitq))
+               wake_up_all(&tbl->slot_waitq);
 }
 
-int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+static int nfs41_sequence_process(struct rpc_task *task,
+               struct nfs4_sequence_res *res)
 {
        struct nfs4_session *session;
        struct nfs4_slot *slot = res->sr_slot;
@@ -714,7 +724,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
        switch (res->sr_status) {
        case 0:
                /* Update the slot's sequence and clientid lease timer */
-               ++slot->seq_nr;
+               slot->seq_done = 1;
                clp = session->clp;
                do_renew_lease(clp, res->sr_timestamp);
                /* Check sequence flags */
@@ -769,16 +779,16 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
                goto retry_nowait;
        default:
                /* Just update the slot sequence no. */
-               ++slot->seq_nr;
+               slot->seq_done = 1;
        }
 out:
        /* The session may be reset by one of the error handlers. */
        dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
-       nfs41_sequence_free_slot(res);
 out_noaction:
        return ret;
 retry_nowait:
        if (rpc_restart_call_prepare(task)) {
+               nfs41_sequence_free_slot(res);
                task->tk_status = 0;
                ret = 0;
        }
@@ -789,8 +799,37 @@ out_retry:
        rpc_delay(task, NFS4_POLL_RETRY_MAX);
        return 0;
 }
+
+int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+       if (!nfs41_sequence_process(task, res))
+               return 0;
+       if (res->sr_slot != NULL)
+               nfs41_sequence_free_slot(res);
+       return 1;
+
+}
 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
 
+static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot == NULL)
+               return 1;
+       if (res->sr_slot->table->session != NULL)
+               return nfs41_sequence_process(task, res);
+       return nfs40_sequence_done(task, res);
+}
+
+static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot != NULL) {
+               if (res->sr_slot->table->session != NULL)
+                       nfs41_sequence_free_slot(res);
+               else
+                       nfs40_sequence_free_slot(res);
+       }
+}
+
 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
 {
        if (res->sr_slot == NULL)
@@ -920,6 +959,17 @@ static int nfs4_setup_sequence(const struct nfs_server *server,
                                    args, res, task);
 }
 
+static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+       return nfs40_sequence_done(task, res);
+}
+
+static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
+{
+       if (res->sr_slot != NULL)
+               nfs40_sequence_free_slot(res);
+}
+
 int nfs4_sequence_done(struct rpc_task *task,
                       struct nfs4_sequence_res *res)
 {
@@ -1197,6 +1247,7 @@ static void nfs4_opendata_free(struct kref *kref)
        struct super_block *sb = p->dentry->d_sb;
 
        nfs_free_seqid(p->o_arg.seqid);
+       nfs4_sequence_free_slot(&p->o_res.seq_res);
        if (p->state != NULL)
                nfs4_put_open_state(p->state);
        nfs4_put_state_owner(p->owner);
@@ -1656,9 +1707,14 @@ err:
 static struct nfs4_state *
 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
 {
+       struct nfs4_state *ret;
+
        if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
-               return _nfs4_opendata_reclaim_to_nfs4_state(data);
-       return _nfs4_opendata_to_nfs4_state(data);
+               ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
+       else
+               ret = _nfs4_opendata_to_nfs4_state(data);
+       nfs4_sequence_free_slot(&data->o_res.seq_res);
+       return ret;
 }
 
 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
@@ -2056,7 +2112,7 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
 
        data->rpc_status = task->tk_status;
 
-       if (!nfs4_sequence_done(task, &data->o_res.seq_res))
+       if (!nfs4_sequence_process(task, &data->o_res.seq_res))
                return;
 
        if (task->tk_status == 0) {
@@ -7864,7 +7920,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
        struct nfs4_layoutget *lgp = calldata;
 
        dprintk("--> %s\n", __func__);
-       nfs41_sequence_done(task, &lgp->res.seq_res);
+       nfs41_sequence_process(task, &lgp->res.seq_res);
        dprintk("<-- %s\n", __func__);
 }
 
@@ -8080,6 +8136,7 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
        /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
        if (status == 0 && lgp->res.layoutp->len)
                lseg = pnfs_layout_process(lgp);
+       nfs4_sequence_free_slot(&lgp->res.seq_res);
        rpc_put_task(task);
        dprintk("<-- %s status=%d\n", __func__, status);
        if (status)
@@ -8106,7 +8163,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
 
        dprintk("--> %s\n", __func__);
 
-       if (!nfs41_sequence_done(task, &lrp->res.seq_res))
+       if (!nfs41_sequence_process(task, &lrp->res.seq_res))
                return;
 
        server = NFS_SERVER(lrp->args.inode);
@@ -8118,6 +8175,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
        case -NFS4ERR_DELAY:
                if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
                        break;
+               nfs4_sequence_free_slot(&lrp->res.seq_res);
                rpc_restart_call_prepare(task);
                return;
        }
@@ -8138,6 +8196,7 @@ static void nfs4_layoutreturn_release(void *calldata)
                pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
        pnfs_clear_layoutreturn_waitbit(lo);
        spin_unlock(&lo->plh_inode->i_lock);
+       nfs4_sequence_free_slot(&lrp->res.seq_res);
        pnfs_free_lseg_list(&freeme);
        pnfs_put_layout_hdr(lrp->args.layout);
        nfs_iput_and_deactive(lrp->inode);
index 332d06e..b629730 100644 (file)
@@ -28,6 +28,7 @@ static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
        tbl->highest_used_slotid = NFS4_NO_SLOT;
        spin_lock_init(&tbl->slot_tbl_lock);
        rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
+       init_waitqueue_head(&tbl->slot_waitq);
        init_completion(&tbl->complete);
 }
 
@@ -172,6 +173,58 @@ struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
        return ERR_PTR(-E2BIG);
 }
 
+static int nfs4_slot_get_seqid(struct nfs4_slot_table  *tbl, u32 slotid,
+               u32 *seq_nr)
+       __must_hold(&tbl->slot_tbl_lock)
+{
+       struct nfs4_slot *slot;
+
+       slot = nfs4_lookup_slot(tbl, slotid);
+       if (IS_ERR(slot))
+               return PTR_ERR(slot);
+       *seq_nr = slot->seq_nr;
+       return 0;
+}
+
+/*
+ * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
+ *
+ * Given a slot table, slot id and sequence number, determine if the
+ * RPC call in question is still in flight. This function is mainly
+ * intended for use by the callback channel.
+ */
+static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
+               u32 slotid, u32 seq_nr)
+{
+       u32 cur_seq;
+       bool ret = false;
+
+       spin_lock(&tbl->slot_tbl_lock);
+       if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
+           cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
+               ret = true;
+       spin_unlock(&tbl->slot_tbl_lock);
+       return ret;
+}
+
+/*
+ * nfs4_slot_wait_on_seqid - wait until a slot sequence id is complete
+ *
+ * Given a slot table, slot id and sequence number, wait until the
+ * corresponding RPC call completes. This function is mainly
+ * intended for use by the callback channel.
+ */
+int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
+               u32 slotid, u32 seq_nr,
+               unsigned long timeout)
+{
+       if (wait_event_timeout(tbl->slot_waitq,
+                       !nfs4_slot_seqid_in_use(tbl, slotid, seq_nr),
+                       timeout) == 0)
+               return -ETIMEDOUT;
+       return 0;
+}
+
 /*
  * nfs4_alloc_slot - efficiently look for a free slot
  *
index 5b51298..f703b75 100644 (file)
@@ -21,7 +21,8 @@ struct nfs4_slot {
        unsigned long           generation;
        u32                     slot_nr;
        u32                     seq_nr;
-       unsigned int            interrupted : 1;
+       unsigned int            interrupted : 1,
+                               seq_done : 1;
 };
 
 /* Sessions */
@@ -36,6 +37,7 @@ struct nfs4_slot_table {
        unsigned long   used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
        spinlock_t      slot_tbl_lock;
        struct rpc_wait_queue   slot_tbl_waitq; /* allocators may wait here */
+       wait_queue_head_t       slot_waitq;     /* Completion wait on slot */
        u32             max_slots;              /* # slots in table */
        u32             max_slotid;             /* Max allowed slotid value */
        u32             highest_used_slotid;    /* sent to server on each SEQ.
@@ -78,6 +80,9 @@ extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
 extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
 extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
 extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid);
+extern int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
+               u32 slotid, u32 seq_nr,
+               unsigned long timeout);
 extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
 extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
 extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
index 70806ca..6daf034 100644 (file)
@@ -1555,6 +1555,7 @@ pnfs_update_layout(struct inode *ino,
        }
 
 lookup_again:
+       nfs4_client_recover_expired_lease(clp);
        first = false;
        spin_lock(&ino->i_lock);
        lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
@@ -2510,7 +2511,6 @@ pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
 
        data->args.fh = NFS_FH(inode);
        data->args.inode = inode;
-       nfs4_stateid_copy(&data->args.stateid, &hdr->plh_stateid);
        status = ld->prepare_layoutstats(&data->args);
        if (status)
                goto out_free;
index 18d446e..d396013 100644 (file)
@@ -923,6 +923,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (data) {
+               data->timeo             = NFS_UNSPEC_TIMEO;
+               data->retrans           = NFS_UNSPEC_RETRANS;
                data->acregmin          = NFS_DEF_ACREGMIN;
                data->acregmax          = NFS_DEF_ACREGMAX;
                data->acdirmin          = NFS_DEF_ACDIRMIN;
@@ -1189,6 +1191,19 @@ static int nfs_get_option_ul(substring_t args[], unsigned long *option)
        return rc;
 }
 
+static int nfs_get_option_ul_bound(substring_t args[], unsigned long *option,
+               unsigned long l_bound, unsigned long u_bound)
+{
+       int ret;
+
+       ret = nfs_get_option_ul(args, option);
+       if (ret != 0)
+               return ret;
+       if (*option < l_bound || *option > u_bound)
+               return -ERANGE;
+       return 0;
+}
+
 /*
  * Error-check and convert a string of mount options from user space into
  * a data structure.  The whole mount string is processed; bad options are
@@ -1352,12 +1367,12 @@ static int nfs_parse_mount_options(char *raw,
                        mnt->bsize = option;
                        break;
                case Opt_timeo:
-                       if (nfs_get_option_ul(args, &option) || option == 0)
+                       if (nfs_get_option_ul_bound(args, &option, 1, INT_MAX))
                                goto out_invalid_value;
                        mnt->timeo = option;
                        break;
                case Opt_retrans:
-                       if (nfs_get_option_ul(args, &option) || option == 0)
+                       if (nfs_get_option_ul_bound(args, &option, 0, INT_MAX))
                                goto out_invalid_value;
                        mnt->retrans = option;
                        break;
index 54e5d66..43fdc27 100644 (file)
@@ -80,6 +80,8 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
        }
 
        for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
+               if (ovl_is_private_xattr(name))
+                       continue;
 retry:
                size = vfs_getxattr(old, name, value, value_size);
                if (size == -ERANGE)
index 12bcd07..1560fdc 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/xattr.h>
 #include <linux/security.h>
 #include <linux/cred.h>
+#include <linux/posix_acl.h>
+#include <linux/posix_acl_xattr.h>
 #include "overlayfs.h"
 
 void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
@@ -186,6 +188,9 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
        struct dentry *newdentry;
        int err;
 
+       if (!hardlink && !IS_POSIXACL(udir))
+               stat->mode &= ~current_umask();
+
        inode_lock_nested(udir, I_MUTEX_PARENT);
        newdentry = lookup_one_len(dentry->d_name.name, upperdir,
                                   dentry->d_name.len);
@@ -335,6 +340,32 @@ out_free:
        return ret;
 }
 
+static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
+                            const struct posix_acl *acl)
+{
+       void *buffer;
+       size_t size;
+       int err;
+
+       if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !acl)
+               return 0;
+
+       size = posix_acl_to_xattr(NULL, acl, NULL, 0);
+       buffer = kmalloc(size, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       size = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
+       err = size;
+       if (err < 0)
+               goto out_free;
+
+       err = vfs_setxattr(upperdentry, name, buffer, size, XATTR_CREATE);
+out_free:
+       kfree(buffer);
+       return err;
+}
+
 static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
                                    struct kstat *stat, const char *link,
                                    struct dentry *hardlink)
@@ -346,10 +377,18 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        struct dentry *upper;
        struct dentry *newdentry;
        int err;
+       struct posix_acl *acl, *default_acl;
 
        if (WARN_ON(!workdir))
                return -EROFS;
 
+       if (!hardlink) {
+               err = posix_acl_create(dentry->d_parent->d_inode,
+                                      &stat->mode, &default_acl, &acl);
+               if (err)
+                       return err;
+       }
+
        err = ovl_lock_rename_workdir(workdir, upperdir);
        if (err)
                goto out;
@@ -384,6 +423,17 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
                if (err)
                        goto out_cleanup;
        }
+       if (!hardlink) {
+               err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_ACCESS,
+                                       acl);
+               if (err)
+                       goto out_cleanup;
+
+               err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_DEFAULT,
+                                       default_acl);
+               if (err)
+                       goto out_cleanup;
+       }
 
        if (!hardlink && S_ISDIR(stat->mode)) {
                err = ovl_set_opaque(newdentry);
@@ -410,6 +460,10 @@ out_dput:
 out_unlock:
        unlock_rename(workdir, upperdir);
 out:
+       if (!hardlink) {
+               posix_acl_release(acl);
+               posix_acl_release(default_acl);
+       }
        return err;
 
 out_cleanup:
@@ -950,9 +1004,9 @@ const struct inode_operations ovl_dir_inode_operations = {
        .permission     = ovl_permission,
        .getattr        = ovl_dir_getattr,
        .setxattr       = generic_setxattr,
-       .getxattr       = ovl_getxattr,
+       .getxattr       = generic_getxattr,
        .listxattr      = ovl_listxattr,
-       .removexattr    = ovl_removexattr,
+       .removexattr    = generic_removexattr,
        .get_acl        = ovl_get_acl,
        .update_time    = ovl_update_time,
 };
index 1b885c1..c75625c 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/xattr.h>
+#include <linux/posix_acl.h>
 #include "overlayfs.h"
 
 static int ovl_copy_up_truncate(struct dentry *dentry)
@@ -191,32 +192,44 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
        return err;
 }
 
-static bool ovl_is_private_xattr(const char *name)
+bool ovl_is_private_xattr(const char *name)
 {
-#define OVL_XATTR_PRE_NAME OVL_XATTR_PREFIX "."
-       return strncmp(name, OVL_XATTR_PRE_NAME,
-                      sizeof(OVL_XATTR_PRE_NAME) - 1) == 0;
+       return strncmp(name, OVL_XATTR_PREFIX,
+                      sizeof(OVL_XATTR_PREFIX) - 1) == 0;
 }
 
-int ovl_setxattr(struct dentry *dentry, struct inode *inode,
-                const char *name, const void *value,
-                size_t size, int flags)
+int ovl_xattr_set(struct dentry *dentry, const char *name, const void *value,
+                 size_t size, int flags)
 {
        int err;
-       struct dentry *upperdentry;
+       struct path realpath;
+       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
        const struct cred *old_cred;
 
        err = ovl_want_write(dentry);
        if (err)
                goto out;
 
+       if (!value && !OVL_TYPE_UPPER(type)) {
+               err = vfs_getxattr(realpath.dentry, name, NULL, 0);
+               if (err < 0)
+                       goto out_drop_write;
+       }
+
        err = ovl_copy_up(dentry);
        if (err)
                goto out_drop_write;
 
-       upperdentry = ovl_dentry_upper(dentry);
+       if (!OVL_TYPE_UPPER(type))
+               ovl_path_upper(dentry, &realpath);
+
        old_cred = ovl_override_creds(dentry->d_sb);
-       err = vfs_setxattr(upperdentry, name, value, size, flags);
+       if (value)
+               err = vfs_setxattr(realpath.dentry, name, value, size, flags);
+       else {
+               WARN_ON(flags != XATTR_REPLACE);
+               err = vfs_removexattr(realpath.dentry, name);
+       }
        revert_creds(old_cred);
 
 out_drop_write:
@@ -225,16 +238,13 @@ out:
        return err;
 }
 
-ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
-                    const char *name, void *value, size_t size)
+int ovl_xattr_get(struct dentry *dentry, const char *name,
+                 void *value, size_t size)
 {
        struct dentry *realdentry = ovl_dentry_real(dentry);
        ssize_t res;
        const struct cred *old_cred;
 
-       if (ovl_is_private_xattr(name))
-               return -ENODATA;
-
        old_cred = ovl_override_creds(dentry->d_sb);
        res = vfs_getxattr(realdentry, name, value, size);
        revert_creds(old_cred);
@@ -245,7 +255,8 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
        struct dentry *realdentry = ovl_dentry_real(dentry);
        ssize_t res;
-       int off;
+       size_t len;
+       char *s;
        const struct cred *old_cred;
 
        old_cred = ovl_override_creds(dentry->d_sb);
@@ -255,73 +266,39 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
                return res;
 
        /* filter out private xattrs */
-       for (off = 0; off < res;) {
-               char *s = list + off;
-               size_t slen = strlen(s) + 1;
+       for (s = list, len = res; len;) {
+               size_t slen = strnlen(s, len) + 1;
 
-               BUG_ON(off + slen > res);
+               /* underlying fs providing us with an broken xattr list? */
+               if (WARN_ON(slen > len))
+                       return -EIO;
 
+               len -= slen;
                if (ovl_is_private_xattr(s)) {
                        res -= slen;
-                       memmove(s, s + slen, res - off);
+                       memmove(s, s + slen, len);
                } else {
-                       off += slen;
+                       s += slen;
                }
        }
 
        return res;
 }
 
-int ovl_removexattr(struct dentry *dentry, const char *name)
-{
-       int err;
-       struct path realpath;
-       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
-       const struct cred *old_cred;
-
-       err = ovl_want_write(dentry);
-       if (err)
-               goto out;
-
-       err = -ENODATA;
-       if (ovl_is_private_xattr(name))
-               goto out_drop_write;
-
-       if (!OVL_TYPE_UPPER(type)) {
-               err = vfs_getxattr(realpath.dentry, name, NULL, 0);
-               if (err < 0)
-                       goto out_drop_write;
-
-               err = ovl_copy_up(dentry);
-               if (err)
-                       goto out_drop_write;
-
-               ovl_path_upper(dentry, &realpath);
-       }
-
-       old_cred = ovl_override_creds(dentry->d_sb);
-       err = vfs_removexattr(realpath.dentry, name);
-       revert_creds(old_cred);
-out_drop_write:
-       ovl_drop_write(dentry);
-out:
-       return err;
-}
-
 struct posix_acl *ovl_get_acl(struct inode *inode, int type)
 {
        struct inode *realinode = ovl_inode_real(inode, NULL);
        const struct cred *old_cred;
        struct posix_acl *acl;
 
-       if (!IS_POSIXACL(realinode))
+       if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
                return NULL;
 
        if (!realinode->i_op->get_acl)
                return NULL;
 
        old_cred = ovl_override_creds(inode->i_sb);
-       acl = realinode->i_op->get_acl(realinode, type);
+       acl = get_acl(realinode, type);
        revert_creds(old_cred);
 
        return acl;
@@ -391,9 +368,9 @@ static const struct inode_operations ovl_file_inode_operations = {
        .permission     = ovl_permission,
        .getattr        = ovl_getattr,
        .setxattr       = generic_setxattr,
-       .getxattr       = ovl_getxattr,
+       .getxattr       = generic_getxattr,
        .listxattr      = ovl_listxattr,
-       .removexattr    = ovl_removexattr,
+       .removexattr    = generic_removexattr,
        .get_acl        = ovl_get_acl,
        .update_time    = ovl_update_time,
 };
@@ -404,9 +381,9 @@ static const struct inode_operations ovl_symlink_inode_operations = {
        .readlink       = ovl_readlink,
        .getattr        = ovl_getattr,
        .setxattr       = generic_setxattr,
-       .getxattr       = ovl_getxattr,
+       .getxattr       = generic_getxattr,
        .listxattr      = ovl_listxattr,
-       .removexattr    = ovl_removexattr,
+       .removexattr    = generic_removexattr,
        .update_time    = ovl_update_time,
 };
 
@@ -415,6 +392,9 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode)
        inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_flags |= S_NOCMTIME;
+#ifdef CONFIG_FS_POSIX_ACL
+       inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
+#endif
 
        mode &= S_IFMT;
        switch (mode) {
index e4f5c95..5813ccf 100644 (file)
@@ -24,8 +24,8 @@ enum ovl_path_type {
        (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
 
 
-#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay"
-#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX ".opaque"
+#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay."
+#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
 
 #define OVL_ISUPPER_MASK 1UL
 
@@ -179,20 +179,21 @@ int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list);
 void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
 void ovl_cache_free(struct list_head *list);
 int ovl_check_d_type_supported(struct path *realpath);
+void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+                        struct dentry *dentry, int level);
 
 /* inode.c */
 int ovl_setattr(struct dentry *dentry, struct iattr *attr);
 int ovl_permission(struct inode *inode, int mask);
-int ovl_setxattr(struct dentry *dentry, struct inode *inode,
-                const char *name, const void *value,
-                size_t size, int flags);
-ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
-                    const char *name, void *value, size_t size);
+int ovl_xattr_set(struct dentry *dentry, const char *name, const void *value,
+                 size_t size, int flags);
+int ovl_xattr_get(struct dentry *dentry, const char *name,
+                 void *value, size_t size);
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
-int ovl_removexattr(struct dentry *dentry, const char *name);
 struct posix_acl *ovl_get_acl(struct inode *inode, int type);
 int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
 int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
+bool ovl_is_private_xattr(const char *name);
 
 struct inode *ovl_new_inode(struct super_block *sb, umode_t mode);
 struct inode *ovl_get_inode(struct super_block *sb, struct inode *realinode);
index cf37fc7..f241b4e 100644 (file)
@@ -248,7 +248,7 @@ static inline int ovl_dir_read(struct path *realpath,
                        err = rdd->err;
        } while (!err && rdd->count);
 
-       if (!err && rdd->first_maybe_whiteout)
+       if (!err && rdd->first_maybe_whiteout && rdd->dentry)
                err = ovl_check_whiteouts(realpath->dentry, rdd);
 
        fput(realfile);
@@ -606,3 +606,64 @@ int ovl_check_d_type_supported(struct path *realpath)
 
        return rdd.d_type_supported;
 }
+
+static void ovl_workdir_cleanup_recurse(struct path *path, int level)
+{
+       int err;
+       struct inode *dir = path->dentry->d_inode;
+       LIST_HEAD(list);
+       struct ovl_cache_entry *p;
+       struct ovl_readdir_data rdd = {
+               .ctx.actor = ovl_fill_merge,
+               .dentry = NULL,
+               .list = &list,
+               .root = RB_ROOT,
+               .is_lowest = false,
+       };
+
+       err = ovl_dir_read(path, &rdd);
+       if (err)
+               goto out;
+
+       inode_lock_nested(dir, I_MUTEX_PARENT);
+       list_for_each_entry(p, &list, l_node) {
+               struct dentry *dentry;
+
+               if (p->name[0] == '.') {
+                       if (p->len == 1)
+                               continue;
+                       if (p->len == 2 && p->name[1] == '.')
+                               continue;
+               }
+               dentry = lookup_one_len(p->name, path->dentry, p->len);
+               if (IS_ERR(dentry))
+                       continue;
+               if (dentry->d_inode)
+                       ovl_workdir_cleanup(dir, path->mnt, dentry, level);
+               dput(dentry);
+       }
+       inode_unlock(dir);
+out:
+       ovl_cache_free(&list);
+}
+
+void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+                        struct dentry *dentry, int level)
+{
+       int err;
+
+       if (!d_is_dir(dentry) || level > 1) {
+               ovl_cleanup(dir, dentry);
+               return;
+       }
+
+       err = ovl_do_rmdir(dir, dentry);
+       if (err) {
+               struct path path = { .mnt = mnt, .dentry = dentry };
+
+               inode_unlock(dir);
+               ovl_workdir_cleanup_recurse(&path, level + 1);
+               inode_lock_nested(dir, I_MUTEX_PARENT);
+               ovl_cleanup(dir, dentry);
+       }
+}
index 4036132..e2a94a2 100644 (file)
@@ -814,6 +814,10 @@ retry:
                struct kstat stat = {
                        .mode = S_IFDIR | 0,
                };
+               struct iattr attr = {
+                       .ia_valid = ATTR_MODE,
+                       .ia_mode = stat.mode,
+               };
 
                if (work->d_inode) {
                        err = -EEXIST;
@@ -821,7 +825,7 @@ retry:
                                goto out_dput;
 
                        retried = true;
-                       ovl_cleanup(dir, work);
+                       ovl_workdir_cleanup(dir, mnt, work, 0);
                        dput(work);
                        goto retry;
                }
@@ -829,6 +833,21 @@ retry:
                err = ovl_create_real(dir, work, &stat, NULL, NULL, true);
                if (err)
                        goto out_dput;
+
+               err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
+               if (err && err != -ENODATA && err != -EOPNOTSUPP)
+                       goto out_dput;
+
+               err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS);
+               if (err && err != -ENODATA && err != -EOPNOTSUPP)
+                       goto out_dput;
+
+               /* Clear any inherited mode bits */
+               inode_lock(work->d_inode);
+               err = notify_change(work, &attr, NULL);
+               inode_unlock(work->d_inode);
+               if (err)
+                       goto out_dput;
        }
 out_unlock:
        inode_unlock(dir);
@@ -967,10 +986,19 @@ static unsigned int ovl_split_lowerdirs(char *str)
        return ctr;
 }
 
-static int ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
-                                  struct dentry *dentry, struct inode *inode,
-                                  const char *name, const void *value,
-                                  size_t size, int flags)
+static int __maybe_unused
+ovl_posix_acl_xattr_get(const struct xattr_handler *handler,
+                       struct dentry *dentry, struct inode *inode,
+                       const char *name, void *buffer, size_t size)
+{
+       return ovl_xattr_get(dentry, handler->name, buffer, size);
+}
+
+static int __maybe_unused
+ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
+                       struct dentry *dentry, struct inode *inode,
+                       const char *name, const void *value,
+                       size_t size, int flags)
 {
        struct dentry *workdir = ovl_workdir(dentry);
        struct inode *realinode = ovl_inode_real(inode, NULL);
@@ -998,19 +1026,22 @@ static int ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
 
        posix_acl_release(acl);
 
-       return ovl_setxattr(dentry, inode, handler->name, value, size, flags);
+       err = ovl_xattr_set(dentry, handler->name, value, size, flags);
+       if (!err)
+               ovl_copyattr(ovl_inode_real(inode, NULL), inode);
+
+       return err;
 
 out_acl_release:
        posix_acl_release(acl);
        return err;
 }
 
-static int ovl_other_xattr_set(const struct xattr_handler *handler,
-                              struct dentry *dentry, struct inode *inode,
-                              const char *name, const void *value,
-                              size_t size, int flags)
+static int ovl_own_xattr_get(const struct xattr_handler *handler,
+                            struct dentry *dentry, struct inode *inode,
+                            const char *name, void *buffer, size_t size)
 {
-       return ovl_setxattr(dentry, inode, name, value, size, flags);
+       return -EPERM;
 }
 
 static int ovl_own_xattr_set(const struct xattr_handler *handler,
@@ -1021,42 +1052,59 @@ static int ovl_own_xattr_set(const struct xattr_handler *handler,
        return -EPERM;
 }
 
-static const struct xattr_handler ovl_posix_acl_access_xattr_handler = {
+static int ovl_other_xattr_get(const struct xattr_handler *handler,
+                              struct dentry *dentry, struct inode *inode,
+                              const char *name, void *buffer, size_t size)
+{
+       return ovl_xattr_get(dentry, name, buffer, size);
+}
+
+static int ovl_other_xattr_set(const struct xattr_handler *handler,
+                              struct dentry *dentry, struct inode *inode,
+                              const char *name, const void *value,
+                              size_t size, int flags)
+{
+       return ovl_xattr_set(dentry, name, value, size, flags);
+}
+
+static const struct xattr_handler __maybe_unused
+ovl_posix_acl_access_xattr_handler = {
        .name = XATTR_NAME_POSIX_ACL_ACCESS,
        .flags = ACL_TYPE_ACCESS,
+       .get = ovl_posix_acl_xattr_get,
        .set = ovl_posix_acl_xattr_set,
 };
 
-static const struct xattr_handler ovl_posix_acl_default_xattr_handler = {
+static const struct xattr_handler __maybe_unused
+ovl_posix_acl_default_xattr_handler = {
        .name = XATTR_NAME_POSIX_ACL_DEFAULT,
        .flags = ACL_TYPE_DEFAULT,
+       .get = ovl_posix_acl_xattr_get,
        .set = ovl_posix_acl_xattr_set,
 };
 
 static const struct xattr_handler ovl_own_xattr_handler = {
        .prefix = OVL_XATTR_PREFIX,
+       .get = ovl_own_xattr_get,
        .set = ovl_own_xattr_set,
 };
 
 static const struct xattr_handler ovl_other_xattr_handler = {
        .prefix = "", /* catch all */
+       .get = ovl_other_xattr_get,
        .set = ovl_other_xattr_set,
 };
 
 static const struct xattr_handler *ovl_xattr_handlers[] = {
+#ifdef CONFIG_FS_POSIX_ACL
        &ovl_posix_acl_access_xattr_handler,
        &ovl_posix_acl_default_xattr_handler,
+#endif
        &ovl_own_xattr_handler,
        &ovl_other_xattr_handler,
        NULL
 };
 
-static const struct xattr_handler *ovl_xattr_noacl_handlers[] = {
-       &ovl_own_xattr_handler,
-       &ovl_other_xattr_handler,
-       NULL,
-};
-
 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct path upperpath = { NULL, NULL };
@@ -1132,7 +1180,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        err = -EINVAL;
        stacklen = ovl_split_lowerdirs(lowertmp);
        if (stacklen > OVL_MAX_STACK) {
-               pr_err("overlayfs: too many lower directries, limit is %d\n",
+               pr_err("overlayfs: too many lower directories, limit is %d\n",
                       OVL_MAX_STACK);
                goto out_free_lowertmp;
        } else if (!ufs->config.upperdir && stacklen == 1) {
@@ -1269,10 +1317,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_magic = OVERLAYFS_SUPER_MAGIC;
        sb->s_op = &ovl_super_operations;
-       if (IS_ENABLED(CONFIG_FS_POSIX_ACL))
-               sb->s_xattr = ovl_xattr_handlers;
-       else
-               sb->s_xattr = ovl_xattr_noacl_handlers;
+       sb->s_xattr = ovl_xattr_handlers;
        sb->s_root = root_dentry;
        sb->s_fs_info = ufs;
        sb->s_flags |= MS_POSIXACL;
index 54e2702..ac0df4d 100644 (file)
@@ -1556,18 +1556,13 @@ static const struct file_operations proc_pid_set_comm_operations = {
 static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
 {
        struct task_struct *task;
-       struct mm_struct *mm;
        struct file *exe_file;
 
        task = get_proc_task(d_inode(dentry));
        if (!task)
                return -ENOENT;
-       mm = get_task_mm(task);
+       exe_file = get_task_exe_file(task);
        put_task_struct(task);
-       if (!mm)
-               return -ENOENT;
-       exe_file = get_mm_exe_file(mm);
-       mmput(mm);
        if (exe_file) {
                *exe_path = exe_file->f_path;
                path_get(&exe_file->f_path);
index 187d84e..f6fa99e 100644 (file)
@@ -581,6 +581,8 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
                mss->anonymous_thp += HPAGE_PMD_SIZE;
        else if (PageSwapBacked(page))
                mss->shmem_thp += HPAGE_PMD_SIZE;
+       else if (is_zone_device_page(page))
+               /* pass */;
        else
                VM_BUG_ON_PAGE(1, page);
        smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
index f35523d..b803213 100644 (file)
@@ -114,9 +114,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
         * If buf != of->prealloc_buf, we don't know how
         * large it is, so cannot safely pass it to ->show
         */
-       if (pos || WARN_ON_ONCE(buf != of->prealloc_buf))
+       if (WARN_ON_ONCE(buf != of->prealloc_buf))
                return 0;
        len = ops->show(kobj, of->kn->priv, buf);
+       if (pos) {
+               if (len <= pos)
+                       return 0;
+               len -= pos;
+               memmove(buf, buf + pos, len);
+       }
        return min(count, len);
 }
 
index 3dd8f1d..05b5243 100644 (file)
@@ -2278,6 +2278,8 @@ xfs_alloc_log_agf(
                offsetof(xfs_agf_t, agf_btreeblks),
                offsetof(xfs_agf_t, agf_uuid),
                offsetof(xfs_agf_t, agf_rmap_blocks),
+               /* needed so that we don't log the whole rest of the structure: */
+               offsetof(xfs_agf_t, agf_spare64),
                sizeof(xfs_agf_t)
        };
 
index b5c213a..0856979 100644 (file)
@@ -1814,6 +1814,10 @@ xfs_btree_lookup(
 
        XFS_BTREE_STATS_INC(cur, lookup);
 
+       /* No such thing as a zero-level tree. */
+       if (cur->bc_nlevels == 0)
+               return -EFSCORRUPTED;
+
        block = NULL;
        keyno = 0;
 
@@ -4554,15 +4558,22 @@ xfs_btree_simple_query_range(
        if (error)
                goto out;
 
+       /* Nothing?  See if there's anything to the right. */
+       if (!stat) {
+               error = xfs_btree_increment(cur, 0, &stat);
+               if (error)
+                       goto out;
+       }
+
        while (stat) {
                /* Find the record. */
                error = xfs_btree_get_rec(cur, &recp, &stat);
                if (error || !stat)
                        break;
-               cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
 
                /* Skip if high_key(rec) < low_key. */
                if (firstrec) {
+                       cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
                        firstrec = false;
                        diff = cur->bc_ops->diff_two_keys(cur, low_key,
                                        &rec_key);
@@ -4571,6 +4582,7 @@ xfs_btree_simple_query_range(
                }
 
                /* Stop if high_key < low_key(rec). */
+               cur->bc_ops->init_key_from_rec(&rec_key, recp);
                diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
                if (diff > 0)
                        break;
index 054a203..c221d0e 100644 (file)
@@ -194,7 +194,7 @@ xfs_defer_trans_abort(
        /* Abort intent items. */
        list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
                trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
-               if (dfp->dfp_committed)
+               if (!dfp->dfp_done)
                        dfp->dfp_type->abort_intent(dfp->dfp_intent);
        }
 
@@ -290,7 +290,6 @@ xfs_defer_finish(
        struct xfs_defer_pending        *dfp;
        struct list_head                *li;
        struct list_head                *n;
-       void                            *done_item = NULL;
        void                            *state;
        int                             error = 0;
        void                            (*cleanup_fn)(struct xfs_trans *, void *, int);
@@ -309,19 +308,11 @@ xfs_defer_finish(
                if (error)
                        goto out;
 
-               /* Mark all pending intents as committed. */
-               list_for_each_entry_reverse(dfp, &dop->dop_pending, dfp_list) {
-                       if (dfp->dfp_committed)
-                               break;
-                       trace_xfs_defer_pending_commit((*tp)->t_mountp, dfp);
-                       dfp->dfp_committed = true;
-               }
-
                /* Log an intent-done item for the first pending item. */
                dfp = list_first_entry(&dop->dop_pending,
                                struct xfs_defer_pending, dfp_list);
                trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
-               done_item = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
+               dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
                                dfp->dfp_count);
                cleanup_fn = dfp->dfp_type->finish_cleanup;
 
@@ -331,7 +322,7 @@ xfs_defer_finish(
                        list_del(li);
                        dfp->dfp_count--;
                        error = dfp->dfp_type->finish_item(*tp, dop, li,
-                                       done_item, &state);
+                                       dfp->dfp_done, &state);
                        if (error) {
                                /*
                                 * Clean up after ourselves and jump out.
@@ -428,8 +419,8 @@ xfs_defer_add(
                dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
                                KM_SLEEP | KM_NOFS);
                dfp->dfp_type = defer_op_types[type];
-               dfp->dfp_committed = false;
                dfp->dfp_intent = NULL;
+               dfp->dfp_done = NULL;
                dfp->dfp_count = 0;
                INIT_LIST_HEAD(&dfp->dfp_work);
                list_add_tail(&dfp->dfp_list, &dop->dop_intake);
index cc3981c..e96533d 100644 (file)
@@ -30,8 +30,8 @@ struct xfs_defer_op_type;
 struct xfs_defer_pending {
        const struct xfs_defer_op_type  *dfp_type;      /* function pointers */
        struct list_head                dfp_list;       /* pending items */
-       bool                            dfp_committed;  /* committed trans? */
        void                            *dfp_intent;    /* log intent item */
+       void                            *dfp_done;      /* log done item */
        struct list_head                dfp_work;       /* work items */
        unsigned int                    dfp_count;      /* # extent items */
 };
index e6a8bea..270fb5c 100644 (file)
@@ -674,7 +674,8 @@ typedef struct xfs_agf {
 #define        XFS_AGF_BTREEBLKS       0x00000800
 #define        XFS_AGF_UUID            0x00001000
 #define        XFS_AGF_RMAP_BLOCKS     0x00002000
-#define        XFS_AGF_NUM_BITS        14
+#define        XFS_AGF_SPARE64         0x00004000
+#define        XFS_AGF_NUM_BITS        15
 #define        XFS_AGF_ALL_BITS        ((1 << XFS_AGF_NUM_BITS) - 1)
 
 #define XFS_AGF_FLAGS \
@@ -691,7 +692,8 @@ typedef struct xfs_agf {
        { XFS_AGF_LONGEST,      "LONGEST" }, \
        { XFS_AGF_BTREEBLKS,    "BTREEBLKS" }, \
        { XFS_AGF_UUID,         "UUID" }, \
-       { XFS_AGF_RMAP_BLOCKS,  "RMAP_BLOCKS" }
+       { XFS_AGF_RMAP_BLOCKS,  "RMAP_BLOCKS" }, \
+       { XFS_AGF_SPARE64,      "SPARE64" }
 
 /* disk block (xfs_daddr_t) in the AG */
 #define XFS_AGF_DADDR(mp)      ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
index 0e3d4f5..4aecc5f 100644 (file)
@@ -583,7 +583,8 @@ xfs_sb_verify(
         * Only check the in progress field for the primary superblock as
         * mkfs.xfs doesn't clear it from secondary superblocks.
         */
-       return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
+       return xfs_mount_validate_sb(mp, &sb,
+                                    bp->b_maps[0].bm_bn == XFS_SB_DADDR,
                                     check_version);
 }
 
index 607cc29..b5b9bff 100644 (file)
@@ -1611,7 +1611,7 @@ xfs_wait_buftarg(
         */
        while (percpu_counter_sum(&btp->bt_io_count))
                delay(100);
-       drain_workqueue(btp->bt_mount->m_buf_workqueue);
+       flush_workqueue(btp->bt_mount->m_buf_workqueue);
 
        /* loop until there is nothing left on the lru list. */
        while (list_lru_count(&btp->bt_lru)) {
index 24ef83e..fd6be45 100644 (file)
@@ -1574,9 +1574,16 @@ xfs_fs_fill_super(
                }
        }
 
-       if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+       if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+               if (mp->m_sb.sb_rblocks) {
+                       xfs_alert(mp,
+       "EXPERIMENTAL reverse mapping btree not compatible with realtime device!");
+                       error = -EINVAL;
+                       goto out_filestream_unmount;
+               }
                xfs_alert(mp,
        "EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!");
+       }
 
        error = xfs_mountfs(mp);
        if (error)
index 7e88bec..d303a66 100644 (file)
@@ -2295,7 +2295,7 @@ DECLARE_EVENT_CLASS(xfs_defer_pending_class,
                __entry->dev = mp ? mp->m_super->s_dev : 0;
                __entry->type = dfp->dfp_type->type;
                __entry->intent = dfp->dfp_intent;
-               __entry->committed = dfp->dfp_committed;
+               __entry->committed = dfp->dfp_done != NULL;
                __entry->nr = dfp->dfp_count;
        ),
        TP_printk("dev %d:%d optype %d intent %p committed %d nr %d\n",
index 1bfa602..5dea1fb 100644 (file)
@@ -72,6 +72,7 @@ struct exception_table_entry
 /* Returns 0 if exception not found and fixup otherwise.  */
 extern unsigned long search_exception_table(unsigned long);
 
+
 /*
  * architectures with an MMU should override these two
  */
index 4d8452c..c5eaf2f 100644 (file)
@@ -1056,7 +1056,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
        return NULL;
 }
 
-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
        static const void * __acpi_table_##name[]                       \
                __attribute__((unused))                                 \
                 = { (void *) table_id,                                 \
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
new file mode 100644 (file)
index 0000000..f6505d8
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BITFIELD_H
+#define _LINUX_BITFIELD_H
+
+#include <linux/bug.h>
+
+/*
+ * Bitfield access macros
+ *
+ * FIELD_{GET,PREP} macros take as first parameter shifted mask
+ * from which they extract the base mask and shift amount.
+ * Mask must be a compilation time constant.
+ *
+ * Example:
+ *
+ *  #define REG_FIELD_A  GENMASK(6, 0)
+ *  #define REG_FIELD_B  BIT(7)
+ *  #define REG_FIELD_C  GENMASK(15, 8)
+ *  #define REG_FIELD_D  GENMASK(31, 16)
+ *
+ * Get:
+ *  a = FIELD_GET(REG_FIELD_A, reg);
+ *  b = FIELD_GET(REG_FIELD_B, reg);
+ *
+ * Set:
+ *  reg = FIELD_PREP(REG_FIELD_A, 1) |
+ *       FIELD_PREP(REG_FIELD_B, 0) |
+ *       FIELD_PREP(REG_FIELD_C, c) |
+ *       FIELD_PREP(REG_FIELD_D, 0x40);
+ *
+ * Modify:
+ *  reg &= ~REG_FIELD_C;
+ *  reg |= FIELD_PREP(REG_FIELD_C, c);
+ */
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx)                      \
+       ({                                                              \
+               BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask),          \
+                                _pfx "mask is not constant");          \
+               BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero");        \
+               BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ?           \
+                                ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
+                                _pfx "value too large for the field"); \
+               BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull,         \
+                                _pfx "type of reg too small for mask"); \
+               __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) +                 \
+                                             (1ULL << __bf_shf(_mask))); \
+       })
+
+/**
+ * FIELD_PREP() - prepare a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val:  value to put in the field
+ *
+ * FIELD_PREP() masks and shifts up the value.  The result should
+ * be combined with other fields of the bitfield using logical OR.
+ */
+#define FIELD_PREP(_mask, _val)                                                \
+       ({                                                              \
+               __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: ");    \
+               ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask);   \
+       })
+
+/**
+ * FIELD_GET() - extract a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_reg:  32bit value of entire bitfield
+ *
+ * FIELD_GET() extracts the field specified by @_mask from the
+ * bitfield passed in as @_reg by masking and shifting it down.
+ */
+#define FIELD_GET(_mask, _reg)                                         \
+       ({                                                              \
+               __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: ");       \
+               (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+       })
+
+#endif
index e51b070..292d6a1 100644 (file)
@@ -13,6 +13,7 @@ enum bug_trap_type {
 struct pt_regs;
 
 #ifdef __CHECKER__
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
 #define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
 #define BUILD_BUG_ON_ZERO(e) (0)
 #define BUILD_BUG_ON_NULL(e) ((void*)0)
@@ -24,6 +25,8 @@ struct pt_regs;
 #else /* __CHECKER__ */
 
 /* Force a compilation error if a constant expression is not a power of 2 */
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n)       \
+       BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
 #define BUILD_BUG_ON_NOT_POWER_OF_2(n)                 \
        BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
 
index 8dbc892..573c5a1 100644 (file)
 #define __compiler_offsetof(a, b)                                      \
        __builtin_offsetof(a, b)
 
-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+#if GCC_VERSION >= 40100
 # define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
 #endif
 
index 8cc719a..2ac6fa5 100644 (file)
@@ -49,8 +49,6 @@ struct fence_cb;
  * @timestamp: Timestamp when the fence was signaled.
  * @status: Optional, only valid if < 0, must be set before calling
  * fence_signal, indicates that the fence has completed with an error.
- * @child_list: list of children fences
- * @active_list: list of active fences
  *
  * the flags member must be manipulated and read using the appropriate
  * atomic ops (bit_*), so taking the spinlock will not be needed most
index 3523bf6..901e25d 100644 (file)
@@ -574,6 +574,7 @@ static inline void mapping_allow_writable(struct address_space *mapping)
 
 struct posix_acl;
 #define ACL_NOT_CACHED ((void *)(-1))
+#define ACL_DONT_CACHE ((void *)(-3))
 
 static inline struct posix_acl *
 uncached_acl_sentinel(struct task_struct *task)
index cfa6cde..76cff18 100644 (file)
@@ -274,8 +274,7 @@ extern void fscrypt_restore_control_page(struct page *);
 extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
                                                unsigned int);
 /* policy.c */
-extern int fscrypt_process_policy(struct inode *,
-                                       const struct fscrypt_policy *);
+extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
 extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
 extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
 extern int fscrypt_inherit_context(struct inode *, struct inode *,
@@ -345,7 +344,7 @@ static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
 }
 
 /* policy.c */
-static inline int fscrypt_notsupp_process_policy(struct inode *i,
+static inline int fscrypt_notsupp_process_policy(struct file *f,
                                const struct fscrypt_policy *p)
 {
        return -EOPNOTSUPP;
index 5198f8e..c97eab6 100644 (file)
@@ -62,7 +62,7 @@ void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
                                  const char *name,
                                  struct config_item_type *type)
 {
-#ifdef CONFIG_CONFIGFS_FS
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
        config_group_init_type_name(&t->group, name, type);
 #endif
 }
index 3267df4..3d70ece 100644 (file)
@@ -18,6 +18,11 @@ struct vm_fault;
 #define IOMAP_MAPPED   0x03    /* blocks allocated @blkno */
 #define IOMAP_UNWRITTEN        0x04    /* blocks allocated @blkno in unwritten state */
 
+/*
+ * Flags for iomap mappings:
+ */
+#define IOMAP_F_MERGED 0x01    /* contains multiple blocks/extents */
+
 /*
  * Magic value for blkno:
  */
@@ -27,7 +32,8 @@ struct iomap {
        sector_t                blkno;  /* 1st sector of mapping, 512b units */
        loff_t                  offset; /* file offset of mapping, bytes */
        u64                     length; /* length of mapping, bytes */
-       int                     type;   /* type of mapping */
+       u16                     type;   /* type of mapping */
+       u16                     flags;  /* flags for mapping */
        struct block_device     *bdev;  /* block device for I/O */
 };
 
index 4429d25..5e5b296 100644 (file)
@@ -195,6 +195,7 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
 }
 
 extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+extern void mpol_put_task_policy(struct task_struct *);
 
 #else
 
@@ -297,5 +298,8 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
        return -1; /* no node preference */
 }
 
+static inline void mpol_put_task_policy(struct task_struct *task)
+{
+}
 #endif /* CONFIG_NUMA */
 #endif
diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h
new file mode 100644 (file)
index 0000000..304985e
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * TI DaVinci DA8xx CHIPCFGx registers for syscon consumers.
+ *
+ * Copyright (C) 2016 David Lechner <david@lechnology.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H
+#define __LINUX_MFD_DA8XX_CFGCHIP_H
+
+#include <linux/bitops.h>
+
+/* register offset (32-bit registers) */
+#define CFGCHIP(n)                             ((n) * 4)
+
+/* CFGCHIP0 (PLL0/EDMA3_0) register bits */
+#define CFGCHIP0_PLL_MASTER_LOCK               BIT(4)
+#define CFGCHIP0_EDMA30TC1DBS(n)               ((n) << 2)
+#define CFGCHIP0_EDMA30TC1DBS_MASK             CFGCHIP0_EDMA30TC1DBS(0x3)
+#define CFGCHIP0_EDMA30TC1DBS_16               CFGCHIP0_EDMA30TC1DBS(0x0)
+#define CFGCHIP0_EDMA30TC1DBS_32               CFGCHIP0_EDMA30TC1DBS(0x1)
+#define CFGCHIP0_EDMA30TC1DBS_64               CFGCHIP0_EDMA30TC1DBS(0x2)
+#define CFGCHIP0_EDMA30TC0DBS(n)               ((n) << 0)
+#define CFGCHIP0_EDMA30TC0DBS_MASK             CFGCHIP0_EDMA30TC0DBS(0x3)
+#define CFGCHIP0_EDMA30TC0DBS_16               CFGCHIP0_EDMA30TC0DBS(0x0)
+#define CFGCHIP0_EDMA30TC0DBS_32               CFGCHIP0_EDMA30TC0DBS(0x1)
+#define CFGCHIP0_EDMA30TC0DBS_64               CFGCHIP0_EDMA30TC0DBS(0x2)
+
+/* CFGCHIP1 (eCAP/HPI/EDMA3_1/eHRPWM TBCLK/McASP0 AMUTEIN) register bits */
+#define CFGCHIP1_CAP2SRC(n)                    ((n) << 27)
+#define CFGCHIP1_CAP2SRC_MASK                  CFGCHIP1_CAP2SRC(0x1f)
+#define CFGCHIP1_CAP2SRC_ECAP_PIN              CFGCHIP1_CAP2SRC(0x0)
+#define CFGCHIP1_CAP2SRC_MCASP0_TX             CFGCHIP1_CAP2SRC(0x1)
+#define CFGCHIP1_CAP2SRC_MCASP0_RX             CFGCHIP1_CAP2SRC(0x2)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX_THRESHOLD  CFGCHIP1_CAP2SRC(0x7)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX            CFGCHIP1_CAP2SRC(0x8)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_TX            CFGCHIP1_CAP2SRC(0x9)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_MISC          CFGCHIP1_CAP2SRC(0xa)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX_THRESHOLD  CFGCHIP1_CAP2SRC(0xb)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX            CFGCHIP1_CAP2SRC(0xc)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_TX            CFGCHIP1_CAP2SRC(0xd)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_MISC          CFGCHIP1_CAP2SRC(0xe)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX_THRESHOLD  CFGCHIP1_CAP2SRC(0xf)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX            CFGCHIP1_CAP2SRC(0x10)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_TX            CFGCHIP1_CAP2SRC(0x11)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_MISC          CFGCHIP1_CAP2SRC(0x12)
+#define CFGCHIP1_CAP1SRC(n)                    ((n) << 22)
+#define CFGCHIP1_CAP1SRC_MASK                  CFGCHIP1_CAP1SRC(0x1f)
+#define CFGCHIP1_CAP1SRC_ECAP_PIN              CFGCHIP1_CAP1SRC(0x0)
+#define CFGCHIP1_CAP1SRC_MCASP0_TX             CFGCHIP1_CAP1SRC(0x1)
+#define CFGCHIP1_CAP1SRC_MCASP0_RX             CFGCHIP1_CAP1SRC(0x2)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX_THRESHOLD  CFGCHIP1_CAP1SRC(0x7)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX            CFGCHIP1_CAP1SRC(0x8)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_TX            CFGCHIP1_CAP1SRC(0x9)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_MISC          CFGCHIP1_CAP1SRC(0xa)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX_THRESHOLD  CFGCHIP1_CAP1SRC(0xb)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX            CFGCHIP1_CAP1SRC(0xc)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_TX            CFGCHIP1_CAP1SRC(0xd)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_MISC          CFGCHIP1_CAP1SRC(0xe)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX_THRESHOLD  CFGCHIP1_CAP1SRC(0xf)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX            CFGCHIP1_CAP1SRC(0x10)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_TX            CFGCHIP1_CAP1SRC(0x11)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_MISC          CFGCHIP1_CAP1SRC(0x12)
+#define CFGCHIP1_CAP0SRC(n)                    ((n) << 17)
+#define CFGCHIP1_CAP0SRC_MASK                  CFGCHIP1_CAP0SRC(0x1f)
+#define CFGCHIP1_CAP0SRC_ECAP_PIN              CFGCHIP1_CAP0SRC(0x0)
+#define CFGCHIP1_CAP0SRC_MCASP0_TX             CFGCHIP1_CAP0SRC(0x1)
+#define CFGCHIP1_CAP0SRC_MCASP0_RX             CFGCHIP1_CAP0SRC(0x2)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX_THRESHOLD  CFGCHIP1_CAP0SRC(0x7)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX            CFGCHIP1_CAP0SRC(0x8)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_TX            CFGCHIP1_CAP0SRC(0x9)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_MISC          CFGCHIP1_CAP0SRC(0xa)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX_THRESHOLD  CFGCHIP1_CAP0SRC(0xb)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX            CFGCHIP1_CAP0SRC(0xc)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_TX            CFGCHIP1_CAP0SRC(0xd)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_MISC          CFGCHIP1_CAP0SRC(0xe)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX_THRESHOLD  CFGCHIP1_CAP0SRC(0xf)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX            CFGCHIP1_CAP0SRC(0x10)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_TX            CFGCHIP1_CAP0SRC(0x11)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_MISC          CFGCHIP1_CAP0SRC(0x12)
+#define CFGCHIP1_HPIBYTEAD                     BIT(16)
+#define CFGCHIP1_HPIENA                                BIT(15)
+#define CFGCHIP0_EDMA31TC0DBS(n)               ((n) << 13)
+#define CFGCHIP0_EDMA31TC0DBS_MASK             CFGCHIP0_EDMA31TC0DBS(0x3)
+#define CFGCHIP0_EDMA31TC0DBS_16               CFGCHIP0_EDMA31TC0DBS(0x0)
+#define CFGCHIP0_EDMA31TC0DBS_32               CFGCHIP0_EDMA31TC0DBS(0x1)
+#define CFGCHIP0_EDMA31TC0DBS_64               CFGCHIP0_EDMA31TC0DBS(0x2)
+#define CFGCHIP1_TBCLKSYNC                     BIT(12)
+#define CFGCHIP1_AMUTESEL0(n)                  ((n) << 0)
+#define CFGCHIP1_AMUTESEL0_MASK                        CFGCHIP1_AMUTESEL0(0xf)
+#define CFGCHIP1_AMUTESEL0_LOW                 CFGCHIP1_AMUTESEL0(0x0)
+#define CFGCHIP1_AMUTESEL0_BANK_0              CFGCHIP1_AMUTESEL0(0x1)
+#define CFGCHIP1_AMUTESEL0_BANK_1              CFGCHIP1_AMUTESEL0(0x2)
+#define CFGCHIP1_AMUTESEL0_BANK_2              CFGCHIP1_AMUTESEL0(0x3)
+#define CFGCHIP1_AMUTESEL0_BANK_3              CFGCHIP1_AMUTESEL0(0x4)
+#define CFGCHIP1_AMUTESEL0_BANK_4              CFGCHIP1_AMUTESEL0(0x5)
+#define CFGCHIP1_AMUTESEL0_BANK_5              CFGCHIP1_AMUTESEL0(0x6)
+#define CFGCHIP1_AMUTESEL0_BANK_6              CFGCHIP1_AMUTESEL0(0x7)
+#define CFGCHIP1_AMUTESEL0_BANK_7              CFGCHIP1_AMUTESEL0(0x8)
+
+/* CFGCHIP2 (USB PHY) register bits */
+#define CFGCHIP2_PHYCLKGD                      BIT(17)
+#define CFGCHIP2_VBUSSENSE                     BIT(16)
+#define CFGCHIP2_RESET                         BIT(15)
+#define CFGCHIP2_OTGMODE(n)                    ((n) << 13)
+#define CFGCHIP2_OTGMODE_MASK                  CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_OTGMODE_NO_OVERRIDE           CFGCHIP2_OTGMODE(0x0)
+#define CFGCHIP2_OTGMODE_FORCE_HOST            CFGCHIP2_OTGMODE(0x1)
+#define CFGCHIP2_OTGMODE_FORCE_DEVICE          CFGCHIP2_OTGMODE(0x2)
+#define CFGCHIP2_OTGMODE_FORCE_HOST_VBUS_LOW   CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_USB1PHYCLKMUX                 BIT(12)
+#define CFGCHIP2_USB2PHYCLKMUX                 BIT(11)
+#define CFGCHIP2_PHYPWRDN                      BIT(10)
+#define CFGCHIP2_OTGPWRDN                      BIT(9)
+#define CFGCHIP2_DATPOL                                BIT(8)
+#define CFGCHIP2_USB1SUSPENDM                  BIT(7)
+#define CFGCHIP2_PHY_PLLON                     BIT(6)
+#define CFGCHIP2_SESENDEN                      BIT(5)
+#define CFGCHIP2_VBDTCTEN                      BIT(4)
+#define CFGCHIP2_REFFREQ(n)                    ((n) << 0)
+#define CFGCHIP2_REFFREQ_MASK                  CFGCHIP2_REFFREQ(0xf)
+#define CFGCHIP2_REFFREQ_12MHZ                 CFGCHIP2_REFFREQ(0x1)
+#define CFGCHIP2_REFFREQ_24MHZ                 CFGCHIP2_REFFREQ(0x2)
+#define CFGCHIP2_REFFREQ_48MHZ                 CFGCHIP2_REFFREQ(0x3)
+#define CFGCHIP2_REFFREQ_19_2MHZ               CFGCHIP2_REFFREQ(0x4)
+#define CFGCHIP2_REFFREQ_38_4MHZ               CFGCHIP2_REFFREQ(0x5)
+#define CFGCHIP2_REFFREQ_13MHZ                 CFGCHIP2_REFFREQ(0x6)
+#define CFGCHIP2_REFFREQ_26MHZ                 CFGCHIP2_REFFREQ(0x7)
+#define CFGCHIP2_REFFREQ_20MHZ                 CFGCHIP2_REFFREQ(0x8)
+#define CFGCHIP2_REFFREQ_40MHZ                 CFGCHIP2_REFFREQ(0x9)
+
+/* CFGCHIP3 (EMAC/uPP/PLL1/ASYNC3/PRU/DIV4.5/EMIFA) register bits */
+#define CFGCHIP3_RMII_SEL                      BIT(8)
+#define CFGCHIP3_UPP_TX_CLKSRC                 BIT(6)
+#define CFGCHIP3_PLL1_MASTER_LOCK              BIT(5)
+#define CFGCHIP3_ASYNC3_CLKSRC                 BIT(4)
+#define CFGCHIP3_PRUEVTSEL                     BIT(3)
+#define CFGCHIP3_DIV45PENA                     BIT(2)
+#define CFGCHIP3_EMA_CLKSRC                    BIT(1)
+
+/* CFGCHIP4 (McASP0 AMUNTEIN) register bits */
+#define CFGCHIP4_AMUTECLR0                     BIT(0)
+
+#endif /* __LINUX_MFD_DA8XX_CFGCHIP_H */
index 2567a87..7f55b8b 100644 (file)
 /*
  * time in us for processing a single channel, calculated as follows:
  *
- * num cycles = open delay + (sample delay + conv time) * averaging
+ * max num cycles = open delay + (sample delay + conv time) * averaging
  *
- * num cycles: 152 + (1 + 13) * 16 = 376
+ * max num cycles: 262143 + (255 + 13) * 16 = 266431
  *
  * clock frequency: 26MHz / 8 = 3.25MHz
  * clock period: 1 / 3.25MHz = 308ns
  *
- * processing time: 376 * 308ns = 116us
+ * max processing time: 266431 * 308ns = 83ms(approx)
  */
-#define IDLE_TIMEOUT 116 /* microsec */
+#define IDLE_TIMEOUT 83 /* milliseconds */
 
 #define TSCADC_CELLS           2
 
index 73a720f..6045d4d 100644 (file)
@@ -6837,9 +6837,10 @@ struct mlx5_ifc_pude_reg_bits {
 };
 
 struct mlx5_ifc_ptys_reg_bits {
-       u8         an_disable_cap[0x1];
+       u8         reserved_at_0[0x1];
        u8         an_disable_admin[0x1];
-       u8         reserved_at_2[0x6];
+       u8         an_disable_cap[0x1];
+       u8         reserved_at_3[0x5];
        u8         local_port[0x8];
        u8         reserved_at_10[0xd];
        u8         proto_mask[0x3];
index 08ed53e..ef815b9 100644 (file)
@@ -2014,6 +2014,7 @@ extern void mm_drop_all_locks(struct mm_struct *mm);
 
 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
 extern struct file *get_mm_exe_file(struct mm_struct *mm);
+extern struct file *get_task_exe_file(struct task_struct *task);
 
 extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
 extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
index 0d126ae..d43ef96 100644 (file)
@@ -32,6 +32,7 @@
 #define SDIO_DEVICE_ID_BROADCOM_43340          0xa94c
 #define SDIO_DEVICE_ID_BROADCOM_43341          0xa94d
 #define SDIO_DEVICE_ID_BROADCOM_4335_4339      0x4335
+#define SDIO_DEVICE_ID_BROADCOM_4339           0x4339
 #define SDIO_DEVICE_ID_BROADCOM_43362          0xa962
 #define SDIO_DEVICE_ID_BROADCOM_43430          0xa9a6
 #define SDIO_DEVICE_ID_BROADCOM_4345           0x4345
index d572b78..7f2ae99 100644 (file)
@@ -828,9 +828,21 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
  */
 #define zone_idx(zone)         ((zone) - (zone)->zone_pgdat->node_zones)
 
-static inline int populated_zone(struct zone *zone)
+/*
+ * Returns true if a zone has pages managed by the buddy allocator.
+ * All the reclaim decisions have to use this function rather than
+ * populated_zone(). If the whole zone is reserved then we can easily
+ * end up with populated_zone() && !managed_zone().
+ */
+static inline bool managed_zone(struct zone *zone)
+{
+       return zone->managed_pages;
+}
+
+/* Returns true if a zone has memory */
+static inline bool populated_zone(struct zone *zone)
 {
-       return (!!zone->present_pages);
+       return zone->present_pages;
 }
 
 extern int movable_zone;
index 67bb978..2095b6a 100644 (file)
@@ -3266,6 +3266,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
        napi->skb = NULL;
 }
 
+bool netdev_is_rx_handler_busy(struct net_device *dev);
 int netdev_rx_handler_register(struct net_device *dev,
                               rx_handler_func_t *rx_handler,
                               void *rx_handler_data);
index d8b37ba..7676557 100644 (file)
@@ -794,7 +794,7 @@ struct nvmf_connect_command {
 };
 
 struct nvmf_connect_data {
-       uuid_le         hostid;
+       uuid_be         hostid;
        __le16          cntlid;
        char            resv4[238];
        char            subsysnqn[NVMF_NQN_FIELD_LEN];
index fbc1fa6..0ab8359 100644 (file)
@@ -682,15 +682,6 @@ struct pci_driver {
 
 #define        to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
 
-/**
- * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
- * @_table: device table name
- *
- * This macro is deprecated and should not be used in new code.
- */
-#define DEFINE_PCI_DEVICE_TABLE(_table) \
-       const struct pci_device_id _table[]
-
 /**
  * PCI_DEVICE - macro used to describe a specific pci device
  * @vend: the 16 bit PCI Vendor ID
index 923266c..48ec765 100644 (file)
@@ -111,7 +111,6 @@ struct uart_8250_port {
                                                 *   if no_console_suspend
                                                 */
        unsigned char           probe;
-       struct mctrl_gpios      *gpios;
 #define UART_PROBE_RSA (1 << 0)
 
        /*
index cbd8990..2b5b10e 100644 (file)
@@ -118,10 +118,11 @@ static inline int arch_within_stack_frames(const void * const stack,
 extern void __check_object_size(const void *ptr, unsigned long n,
                                        bool to_user);
 
-static inline void check_object_size(const void *ptr, unsigned long n,
-                                    bool to_user)
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+                                             bool to_user)
 {
-       __check_object_size(ptr, n, to_user);
+       if (!__builtin_constant_p(n))
+               __check_object_size(ptr, n, to_user);
 }
 #else
 static inline void check_object_size(const void *ptr, unsigned long n,
index 9b4c418..fd60ecc 100644 (file)
@@ -52,7 +52,7 @@ struct unix_sock {
        struct sock             sk;
        struct unix_address     *addr;
        struct path             path;
-       struct mutex            readlock;
+       struct mutex            iolock, bindlock;
        struct sock             *peer;
        struct list_head        link;
        atomic_long_t           inflight;
index 533cb64..bd26cc6 100644 (file)
@@ -1102,6 +1102,7 @@ struct station_info {
        struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
 };
 
+#if IS_ENABLED(CONFIG_CFG80211)
 /**
  * cfg80211_get_station - retrieve information about a given station
  * @dev: the device where the station is supposed to be connected to
@@ -1114,6 +1115,14 @@ struct station_info {
  */
 int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
                         struct station_info *sinfo);
+#else
+static inline int cfg80211_get_station(struct net_device *dev,
+                                      const u8 *mac_addr,
+                                      struct station_info *sinfo)
+{
+       return -ENOENT;
+}
+#endif
 
 /**
  * enum monitor_flags - monitor flags
index 9d97c52..7556646 100644 (file)
@@ -26,6 +26,7 @@ enum dsa_tag_protocol {
        DSA_TAG_PROTO_TRAILER,
        DSA_TAG_PROTO_EDSA,
        DSA_TAG_PROTO_BRCM,
+       DSA_TAG_PROTO_QCA,
        DSA_TAG_LAST,           /* MUST BE LAST */
 };
 
index 43a5a0e..20ed969 100644 (file)
@@ -23,6 +23,7 @@ struct __ip6_tnl_parm {
        __u8 proto;             /* tunnel protocol */
        __u8 encap_limit;       /* encapsulation limit for tunnel */
        __u8 hop_limit;         /* hop limit for tunnel */
+       bool collect_md;
        __be32 flowinfo;        /* traffic class and flowlabel for tunnel */
        __u32 flags;            /* tunnel flags */
        struct in6_addr laddr;  /* local tunnel end-point address */
index 4079fc1..7d4a72e 100644 (file)
@@ -111,6 +111,7 @@ struct fib_info {
        unsigned char           fib_scope;
        unsigned char           fib_type;
        __be32                  fib_prefsrc;
+       u32                     fib_tb_id;
        u32                     fib_priority;
        u32                     *fib_metrics;
 #define fib_mtu fib_metrics[RTAX_MTU-1]
@@ -319,7 +320,7 @@ void fib_flush_external(struct net *net);
 /* Exported by fib_semantics.c */
 int ip_fib_check_default(__be32 gw, struct net_device *dev);
 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
-int fib_sync_down_addr(struct net *net, __be32 local);
+int fib_sync_down_addr(struct net_device *dev, __be32 local);
 int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
 
 extern u32 fib_multipath_secret __read_mostly;
index e598c63..59557c0 100644 (file)
@@ -255,6 +255,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
 
 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                    const struct iphdr *tnl_params, const u8 protocol);
+void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+                      const u8 proto);
 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
index 3832099..b220dab 100644 (file)
@@ -114,25 +114,6 @@ static inline u32 l3mdev_fib_table(const struct net_device *dev)
        return tb_id;
 }
 
-static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
-{
-       struct net_device *dev;
-       bool rc = false;
-
-       if (ifindex == 0)
-               return false;
-
-       rcu_read_lock();
-
-       dev = dev_get_by_index_rcu(net, ifindex);
-       if (dev)
-               rc = netif_is_l3_master(dev);
-
-       rcu_read_unlock();
-
-       return rc;
-}
-
 struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6);
 
 static inline
@@ -226,11 +207,6 @@ static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
        return 0;
 }
 
-static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
-{
-       return false;
-}
-
 static inline
 struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6)
 {
index d27588c..1139cde 100644 (file)
@@ -36,4 +36,8 @@ void nft_meta_set_eval(const struct nft_expr *expr,
 void nft_meta_set_destroy(const struct nft_ctx *ctx,
                          const struct nft_expr *expr);
 
+int nft_meta_set_validate(const struct nft_ctx *ctx,
+                         const struct nft_expr *expr,
+                         const struct nft_data **data);
+
 #endif
index 60fa153..02e28c5 100644 (file)
@@ -8,6 +8,10 @@ struct nft_reject {
 
 extern const struct nla_policy nft_reject_policy[];
 
+int nft_reject_validate(const struct nft_ctx *ctx,
+                       const struct nft_expr *expr,
+                       const struct nft_data **data);
+
 int nft_reject_init(const struct nft_ctx *ctx,
                    const struct nft_expr *expr,
                    const struct nlattr * const tb[]);
index ce93c4b..8693dc4 100644 (file)
@@ -537,6 +537,7 @@ struct sctp_datamsg {
 struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
                                            struct sctp_sndrcvinfo *,
                                            struct iov_iter *);
+void sctp_datamsg_free(struct sctp_datamsg *);
 void sctp_datamsg_put(struct sctp_datamsg *);
 void sctp_chunk_fail(struct sctp_chunk *, int error);
 int sctp_chunk_abandoned(struct sctp_chunk *);
@@ -1076,7 +1077,7 @@ struct sctp_outq {
 void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
 void sctp_outq_teardown(struct sctp_outq *);
 void sctp_outq_free(struct sctp_outq*);
-int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
+void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
 int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
 int sctp_outq_is_empty(const struct sctp_outq *);
 void sctp_outq_restart(struct sctp_outq *);
@@ -1084,7 +1085,7 @@ void sctp_outq_restart(struct sctp_outq *);
 void sctp_retransmit(struct sctp_outq *, struct sctp_transport *,
                     sctp_retransmit_reason_t);
 void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
-int sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
+void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
 void sctp_prsctp_prune(struct sctp_association *asoc,
                       struct sctp_sndrcvinfo *sinfo, int msg_len);
 /* Uncork and flush an outqueue.  */
diff --git a/include/net/tc_act/tc_skbmod.h b/include/net/tc_act/tc_skbmod.h
new file mode 100644 (file)
index 0000000..644a211
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Jamal Hadi Salim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#ifndef __NET_TC_SKBMOD_H
+#define __NET_TC_SKBMOD_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_skbmod.h>
+
+struct tcf_skbmod_params {
+       struct rcu_head rcu;
+       u64     flags; /*up to 64 types of operations; extend if needed */
+       u8      eth_dst[ETH_ALEN];
+       u16     eth_type;
+       u8      eth_src[ETH_ALEN];
+};
+
+struct tcf_skbmod {
+       struct tc_action        common;
+       struct tcf_skbmod_params __rcu *skbmod_p;
+};
+#define to_skbmod(a) ((struct tcf_skbmod *)a)
+
+#endif /* __NET_TC_SKBMOD_H */
index 13c0b2b..73d8709 100644 (file)
@@ -11,12 +11,12 @@ struct sas_rphy;
 struct request;
 
 #if !IS_ENABLED(CONFIG_SCSI_SAS_ATTRS)
-static inline int is_sas_attached(struct scsi_device *sdev)
+static inline int scsi_is_sas_rphy(const struct device *sdev)
 {
        return 0;
 }
 #else
-extern int is_sas_attached(struct scsi_device *sdev);
+extern int scsi_is_sas_rphy(const struct device *);
 #endif
 
 static inline int sas_protocol_ata(enum sas_protocol proto)
@@ -202,7 +202,6 @@ extern int sas_rphy_add(struct sas_rphy *);
 extern void sas_rphy_remove(struct sas_rphy *);
 extern void sas_rphy_delete(struct sas_rphy *);
 extern void sas_rphy_unlink(struct sas_rphy *);
-extern int scsi_is_sas_rphy(const struct device *);
 
 struct sas_port *sas_port_alloc(struct device *, int);
 struct sas_port *sas_port_alloc_num(struct device *);
index 9865c8c..18d5dc1 100644 (file)
@@ -73,6 +73,7 @@ enum {
        IFLA_IPTUN_ENCAP_FLAGS,
        IFLA_IPTUN_ENCAP_SPORT,
        IFLA_IPTUN_ENCAP_DPORT,
+       IFLA_IPTUN_COLLECT_METADATA,
        __IFLA_IPTUN_MAX,
 };
 #define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
index f9c287c..8915b61 100644 (file)
@@ -428,9 +428,9 @@ enum {
        TCA_FLOWER_KEY_UDP_DST,         /* be16 */
 
        TCA_FLOWER_FLAGS,
-       TCA_FLOWER_KEY_VLAN_ID,
-       TCA_FLOWER_KEY_VLAN_PRIO,
-       TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+       TCA_FLOWER_KEY_VLAN_ID,         /* be16 */
+       TCA_FLOWER_KEY_VLAN_PRIO,       /* u8   */
+       TCA_FLOWER_KEY_VLAN_ETH_TYPE,   /* be16 */
 
        TCA_FLOWER_KEY_ENC_KEY_ID,      /* be32 */
        TCA_FLOWER_KEY_ENC_IPV4_SRC,    /* be32 */
@@ -442,6 +442,10 @@ enum {
        TCA_FLOWER_KEY_ENC_IPV6_DST,    /* struct in6_addr */
        TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
 
+       TCA_FLOWER_KEY_TCP_SRC_MASK,    /* be16 */
+       TCA_FLOWER_KEY_TCP_DST_MASK,    /* be16 */
+       TCA_FLOWER_KEY_UDP_SRC_MASK,    /* be16 */
+       TCA_FLOWER_KEY_UDP_DST_MASK,    /* be16 */
        __TCA_FLOWER_MAX,
 };
 
diff --git a/include/uapi/linux/tc_act/tc_skbmod.h b/include/uapi/linux/tc_act/tc_skbmod.h
new file mode 100644 (file)
index 0000000..10fc07d
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, Jamal Hadi Salim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#ifndef __LINUX_TC_SKBMOD_H
+#define __LINUX_TC_SKBMOD_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_SKBMOD 15
+
+#define SKBMOD_F_DMAC  0x1
+#define SKBMOD_F_SMAC  0x2
+#define SKBMOD_F_ETYPE 0x4
+#define SKBMOD_F_SWAPMAC 0x8
+
+struct tc_skbmod {
+       tc_gen;
+       __u64 flags;
+};
+
+enum {
+       TCA_SKBMOD_UNSPEC,
+       TCA_SKBMOD_TM,
+       TCA_SKBMOD_PARMS,
+       TCA_SKBMOD_DMAC,
+       TCA_SKBMOD_SMAC,
+       TCA_SKBMOD_ETYPE,
+       TCA_SKBMOD_PAD,
+       __TCA_SKBMOD_MAX
+};
+#define TCA_SKBMOD_MAX (__TCA_SKBMOD_MAX - 1)
+
+#endif
index d6709eb..0d302a8 100644 (file)
@@ -19,6 +19,7 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <linux/file.h>
 #include <linux/kernel.h>
 #include <linux/audit.h>
 #include <linux/kthread.h>
@@ -544,10 +545,11 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
        unsigned long ino;
        dev_t dev;
 
-       rcu_read_lock();
-       exe_file = rcu_dereference(tsk->mm->exe_file);
+       exe_file = get_task_exe_file(tsk);
+       if (!exe_file)
+               return 0;
        ino = exe_file->f_inode->i_ino;
        dev = exe_file->f_inode->i_sb->s_dev;
-       rcu_read_unlock();
+       fput(exe_file);
        return audit_mark_compare(mark, ino, dev);
 }
index c2de56a..7fa0c4a 100644 (file)
@@ -1,4 +1,12 @@
+# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
 CONFIG_KERNEL_XZ=y
+# CONFIG_KERNEL_LZO is not set
+# CONFIG_KERNEL_LZ4 is not set
 CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_SLAB is not set
+# CONFIG_SLUB is not set
 CONFIG_SLOB=y
index c7fd277..c27e533 100644 (file)
@@ -2069,6 +2069,20 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
        mutex_unlock(&cpuset_mutex);
 }
 
+/*
+ * Make sure the new task conform to the current state of its parent,
+ * which could have been changed by cpuset just after it inherits the
+ * state from the parent and before it sits on the cgroup's task list.
+ */
+void cpuset_fork(struct task_struct *task)
+{
+       if (task_css_is_root(task, cpuset_cgrp_id))
+               return;
+
+       set_cpus_allowed_ptr(task, &current->cpus_allowed);
+       task->mems_allowed = current->mems_allowed;
+}
+
 struct cgroup_subsys cpuset_cgrp_subsys = {
        .css_alloc      = cpuset_css_alloc,
        .css_online     = cpuset_css_online,
@@ -2079,6 +2093,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
        .attach         = cpuset_attach,
        .post_attach    = cpuset_post_attach,
        .bind           = cpuset_bind,
+       .fork           = cpuset_fork,
        .legacy_cftypes = files,
        .early_init     = true,
 };
index 2f974ae..091a78b 100644 (file)
@@ -848,12 +848,7 @@ void do_exit(long code)
        TASKS_RCU(preempt_enable());
        exit_notify(tsk, group_dead);
        proc_exit_connector(tsk);
-#ifdef CONFIG_NUMA
-       task_lock(tsk);
-       mpol_put(tsk->mempolicy);
-       tsk->mempolicy = NULL;
-       task_unlock(tsk);
-#endif
+       mpol_put_task_policy(tsk);
 #ifdef CONFIG_FUTEX
        if (unlikely(current->pi_state_cache))
                kfree(current->pi_state_cache);
index 52e725d..beb3172 100644 (file)
@@ -798,6 +798,29 @@ struct file *get_mm_exe_file(struct mm_struct *mm)
 }
 EXPORT_SYMBOL(get_mm_exe_file);
 
+/**
+ * get_task_exe_file - acquire a reference to the task's executable file
+ *
+ * Returns %NULL if task's mm (if any) has no associated executable file or
+ * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
+ * User must release file via fput().
+ */
+struct file *get_task_exe_file(struct task_struct *task)
+{
+       struct file *exe_file = NULL;
+       struct mm_struct *mm;
+
+       task_lock(task);
+       mm = task->mm;
+       if (mm) {
+               if (!(task->flags & PF_KTHREAD))
+                       exe_file = get_mm_exe_file(mm);
+       }
+       task_unlock(task);
+       return exe_file;
+}
+EXPORT_SYMBOL(get_task_exe_file);
+
 /**
  * get_task_mm - acquire a reference to the task's mm
  *
@@ -913,14 +936,12 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
        deactivate_mm(tsk, mm);
 
        /*
-        * If we're exiting normally, clear a user-space tid field if
-        * requested.  We leave this alone when dying by signal, to leave
-        * the value intact in a core dump, and to save the unnecessary
-        * trouble, say, a killed vfork parent shouldn't touch this mm.
-        * Userland only wants this done for a sys_exit.
+        * Signal userspace if we're not exiting with a core dump
+        * because we want to leave the value intact for debugging
+        * purposes.
         */
        if (tsk->clear_child_tid) {
-               if (!(tsk->flags & PF_SIGNALED) &&
+               if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
                    atomic_read(&mm->mm_users) > 1) {
                        /*
                         * We don't check the error code - if userspace has
@@ -1404,7 +1425,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->real_start_time = ktime_get_boot_ns();
        p->io_context = NULL;
        p->audit_context = NULL;
-       threadgroup_change_begin(current);
        cgroup_fork(p);
 #ifdef CONFIG_NUMA
        p->mempolicy = mpol_dup(p->mempolicy);
@@ -1556,6 +1576,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        INIT_LIST_HEAD(&p->thread_group);
        p->task_works = NULL;
 
+       threadgroup_change_begin(current);
        /*
         * Ensure that the cgroup subsystem policies allow the new process to be
         * forked. It should be noted the the new process's css_set can be changed
@@ -1656,6 +1677,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 bad_fork_cancel_cgroup:
        cgroup_cancel_fork(p);
 bad_fork_free_pid:
+       threadgroup_change_end(current);
        if (pid != &init_struct_pid)
                free_pid(pid);
 bad_fork_cleanup_thread:
@@ -1688,7 +1710,6 @@ bad_fork_cleanup_policy:
        mpol_put(p->mempolicy);
 bad_fork_cleanup_threadgroup_lock:
 #endif
-       threadgroup_change_end(current);
        delayacct_tsk_free(p);
 bad_fork_cleanup_count:
        atomic_dec(&p->cred->user->processes);
index 503bc2d..037c321 100644 (file)
@@ -887,7 +887,10 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min,
        return 0;
 out:
        vfree(pi->sechdrs);
+       pi->sechdrs = NULL;
+
        vfree(pi->purgatory_buf);
+       pi->purgatory_buf = NULL;
        return ret;
 }
 
index 251d16b..b501e39 100644 (file)
@@ -247,6 +247,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(resource_size(res), SECTION_SIZE);
        arch_remove_memory(align_start, align_size);
+       untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
        pgmap_radix_release(res);
        dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
                        "%s: failed to free all reserved pages\n", __func__);
@@ -282,6 +283,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
                struct percpu_ref *ref, struct vmem_altmap *altmap)
 {
        resource_size_t key, align_start, align_size, align_end;
+       pgprot_t pgprot = PAGE_KERNEL;
        struct dev_pagemap *pgmap;
        struct page_map *page_map;
        int error, nid, is_ram;
@@ -351,6 +353,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        if (nid < 0)
                nid = numa_mem_id();
 
+       error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
+                       align_size);
+       if (error)
+               goto err_pfn_remap;
+
        error = arch_add_memory(nid, align_start, align_size, true);
        if (error)
                goto err_add_memory;
@@ -371,6 +378,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        return __va(res->start);
 
  err_add_memory:
+       untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+ err_pfn_remap:
  err_radix:
        pgmap_radix_release(res);
        devres_free(page_map);
index 97b0df7..168ff44 100644 (file)
@@ -482,7 +482,16 @@ void pm_qos_update_request(struct pm_qos_request *req,
                return;
        }
 
-       cancel_delayed_work_sync(&req->work);
+       /*
+        * This function may be called very early during boot, for example,
+        * from of_clk_init(), where irq needs to stay disabled.
+        * cancel_delayed_work_sync() assumes that irq is enabled on
+        * invocation and re-enables it on return.  Avoid calling it until
+        * workqueue is initialized.
+        */
+       if (keventd_up())
+               cancel_delayed_work_sync(&req->work);
+
        __pm_qos_update_request(req, new_value);
 }
 EXPORT_SYMBOL_GPL(pm_qos_update_request);
index b69eb8a..16bab47 100644 (file)
@@ -99,26 +99,32 @@ again:
        return add;
 }
 
-/*
- * printk one line from the temporary buffer from @start index until
- * and including the @end index.
- */
-static void print_nmi_seq_line(struct nmi_seq_buf *s, int start, int end)
+static void printk_nmi_flush_line(const char *text, int len)
 {
-       const char *buf = s->buffer + start;
-
        /*
         * The buffers are flushed in NMI only on panic.  The messages must
         * go only into the ring buffer at this stage.  Consoles will get
         * explicitly called later when a crashdump is not generated.
         */
        if (in_nmi())
-               printk_deferred("%.*s", (end - start) + 1, buf);
+               printk_deferred("%.*s", len, text);
        else
-               printk("%.*s", (end - start) + 1, buf);
+               printk("%.*s", len, text);
 
 }
 
+/*
+ * printk one line from the temporary buffer from @start index until
+ * and including the @end index.
+ */
+static void printk_nmi_flush_seq_line(struct nmi_seq_buf *s,
+                                       int start, int end)
+{
+       const char *buf = s->buffer + start;
+
+       printk_nmi_flush_line(buf, (end - start) + 1);
+}
+
 /*
  * Flush data from the associated per_CPU buffer. The function
  * can be called either via IRQ work or independently.
@@ -150,9 +156,11 @@ more:
         * the buffer an unexpected way. If we printed something then
         * @len must only increase.
         */
-       if (i && i >= len)
-               pr_err("printk_nmi_flush: internal error: i=%d >= len=%zu\n",
-                      i, len);
+       if (i && i >= len) {
+               const char *msg = "printk_nmi_flush: internal error\n";
+
+               printk_nmi_flush_line(msg, strlen(msg));
+       }
 
        if (!len)
                goto out; /* Someone else has already flushed the buffer. */
@@ -166,14 +174,14 @@ more:
        /* Print line by line. */
        for (; i < size; i++) {
                if (s->buffer[i] == '\n') {
-                       print_nmi_seq_line(s, last_i, i);
+                       printk_nmi_flush_seq_line(s, last_i, i);
                        last_i = i + 1;
                }
        }
        /* Check if there was a partial line. */
        if (last_i < size) {
-               print_nmi_seq_line(s, last_i, size - 1);
-               pr_cont("\n");
+               printk_nmi_flush_seq_line(s, last_i, size - 1);
+               printk_nmi_flush_line("\n", strlen("\n"));
        }
 
        /*
index ef6c6c3..0db7c8a 100644 (file)
@@ -605,12 +605,16 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
                ptrace_event(PTRACE_EVENT_SECCOMP, data);
                /*
                 * The delivery of a fatal signal during event
-                * notification may silently skip tracer notification.
-                * Terminating the task now avoids executing a system
-                * call that may not be intended.
+                * notification may silently skip tracer notification,
+                * which could leave us with a potentially unmodified
+                * syscall that the tracer would have liked to have
+                * changed. Since the process is about to die, we just
+                * force the syscall to be skipped and let the signal
+                * kill the process and correctly handle any tracer exit
+                * notifications.
                 */
                if (fatal_signal_pending(current))
-                       do_exit(SIGSYS);
+                       goto skip;
                /* Check if the tracer forced the syscall to be skipped. */
                this_syscall = syscall_get_nr(current, task_pt_regs(current));
                if (this_syscall < 0)
index 204fdc8..2ec7c00 100644 (file)
@@ -908,10 +908,11 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
        ktime_t now, expires;
        int cpu = smp_processor_id();
 
+       now = tick_nohz_start_idle(ts);
+
        if (can_stop_idle_tick(cpu, ts)) {
                int was_stopped = ts->tick_stopped;
 
-               now = tick_nohz_start_idle(ts);
                ts->idle_calls++;
 
                expires = tick_nohz_stop_sched_tick(ts, now, cpu);
index 2307d7c..2e2cca5 100644 (file)
@@ -1686,24 +1686,6 @@ config LATENCYTOP
          Enable this option if you want to use the LatencyTOP tool
          to find out which userspace is blocking on what kernel operations.
 
-config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
-       bool
-
-config DEBUG_STRICT_USER_COPY_CHECKS
-       bool "Strict user copy size checks"
-       depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
-       depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
-       help
-         Enabling this option turns a certain set of sanity checks for user
-         copy operations into compile time failures.
-
-         The copy_from_user() etc checks are there to help test if there
-         are sufficient security checks on the length argument of
-         the copy operation, by having gcc prove that the argument is
-         within bounds.
-
-         If unsure, say N.
-
 source kernel/trace/Kconfig
 
 menu "Runtime Testing"
index cfa68eb..5dc77a8 100644 (file)
@@ -24,7 +24,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
         earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o
 
-obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
 lib-$(CONFIG_HAS_DMA) += dma-noop.o
index 93f4501..94346b4 100644 (file)
@@ -5485,6 +5485,7 @@ static struct sk_buff *populate_skb(char *buf, int size)
        skb->hash = SKB_HASH;
        skb->queue_mapping = SKB_QUEUE_MAP;
        skb->vlan_tci = SKB_VLAN_TCI;
+       skb->vlan_proto = htons(ETH_P_IP);
        skb->dev = &dev;
        skb->dev->ifindex = SKB_DEV_IFINDEX;
        skb->dev->type = SKB_DEV_TYPE;
index 66c5fc8..cac20c5 100644 (file)
@@ -143,7 +143,7 @@ static int __init
 test_hash_init(void)
 {
        char buf[SIZE+1];
-       u32 string_or = 0, hash_or[2][33] = { 0 };
+       u32 string_or = 0, hash_or[2][33] = { { 0, } };
        unsigned tests = 0;
        unsigned long long h64 = 0;
        int i, j;
@@ -219,21 +219,27 @@ test_hash_init(void)
        }
 
        /* Issue notices about skipped tests. */
-#ifndef HAVE_ARCH__HASH_32
-       pr_info("__hash_32() has no arch implementation to test.");
-#elif HAVE_ARCH__HASH_32 != 1
+#ifdef HAVE_ARCH__HASH_32
+#if HAVE_ARCH__HASH_32 != 1
        pr_info("__hash_32() is arch-specific; not compared to generic.");
 #endif
-#ifndef HAVE_ARCH_HASH_32
-       pr_info("hash_32() has no arch implementation to test.");
-#elif HAVE_ARCH_HASH_32 != 1
+#else
+       pr_info("__hash_32() has no arch implementation to test.");
+#endif
+#ifdef HAVE_ARCH_HASH_32
+#if HAVE_ARCH_HASH_32 != 1
        pr_info("hash_32() is arch-specific; not compared to generic.");
 #endif
-#ifndef HAVE_ARCH_HASH_64
-       pr_info("hash_64() has no arch implementation to test.");
-#elif HAVE_ARCH_HASH_64 != 1
+#else
+       pr_info("hash_32() has no arch implementation to test.");
+#endif
+#ifdef HAVE_ARCH_HASH_64
+#if HAVE_ARCH_HASH_64 != 1
        pr_info("hash_64() is arch-specific; not compared to generic.");
 #endif
+#else
+       pr_info("hash_64() has no arch implementation to test.");
+#endif
 
        pr_notice("%u tests passed.", tests);
 
diff --git a/lib/usercopy.c b/lib/usercopy.c
deleted file mode 100644 (file)
index 4f5b1dd..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#include <linux/export.h>
-#include <linux/bug.h>
-#include <linux/uaccess.h>
-
-void copy_from_user_overflow(void)
-{
-       WARN(1, "Buffer overflow detected!\n");
-}
-EXPORT_SYMBOL(copy_from_user_overflow);
index 2db2112..a6abd76 100644 (file)
@@ -1078,7 +1078,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                goto out;
 
        page = pmd_page(*pmd);
-       VM_BUG_ON_PAGE(!PageHead(page), page);
+       VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
        if (flags & FOLL_TOUCH)
                touch_pmd(vma, addr, pmd);
        if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
@@ -1116,7 +1116,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        }
 skip_mlock:
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
-       VM_BUG_ON_PAGE(!PageCompound(page), page);
+       VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
        if (flags & FOLL_GET)
                get_page(page);
 
index d8c4e38..2da72a5 100644 (file)
@@ -2336,6 +2336,23 @@ out:
        return ret;
 }
 
+/*
+ * Drop the (possibly final) reference to task->mempolicy.  It needs to be
+ * dropped after task->mempolicy is set to NULL so that any allocation done as
+ * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
+ * policy.
+ */
+void mpol_put_task_policy(struct task_struct *task)
+{
+       struct mempolicy *pol;
+
+       task_lock(task);
+       pol = task->mempolicy;
+       task->mempolicy = NULL;
+       task_unlock(task);
+       mpol_put(pol);
+}
+
 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
 {
        pr_debug("deleting %lx-l%lx\n", n->start, n->end);
index 3fbe73a..a2214c6 100644 (file)
@@ -3137,54 +3137,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        return NULL;
 }
 
-static inline bool
-should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
-                    enum compact_result compact_result,
-                    enum compact_priority *compact_priority,
-                    int compaction_retries)
-{
-       int max_retries = MAX_COMPACT_RETRIES;
-
-       if (!order)
-               return false;
-
-       /*
-        * compaction considers all the zone as desperately out of memory
-        * so it doesn't really make much sense to retry except when the
-        * failure could be caused by insufficient priority
-        */
-       if (compaction_failed(compact_result)) {
-               if (*compact_priority > MIN_COMPACT_PRIORITY) {
-                       (*compact_priority)--;
-                       return true;
-               }
-               return false;
-       }
-
-       /*
-        * make sure the compaction wasn't deferred or didn't bail out early
-        * due to locks contention before we declare that we should give up.
-        * But do not retry if the given zonelist is not suitable for
-        * compaction.
-        */
-       if (compaction_withdrawn(compact_result))
-               return compaction_zonelist_suitable(ac, order, alloc_flags);
-
-       /*
-        * !costly requests are much more important than __GFP_REPEAT
-        * costly ones because they are de facto nofail and invoke OOM
-        * killer to move on while costly can fail and users are ready
-        * to cope with that. 1/4 retries is rather arbitrary but we
-        * would need much more detailed feedback from compaction to
-        * make a better decision.
-        */
-       if (order > PAGE_ALLOC_COSTLY_ORDER)
-               max_retries /= 4;
-       if (compaction_retries <= max_retries)
-               return true;
-
-       return false;
-}
 #else
 static inline struct page *
 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
@@ -3195,6 +3147,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        return NULL;
 }
 
+#endif /* CONFIG_COMPACTION */
+
 static inline bool
 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
                     enum compact_result compact_result,
@@ -3221,7 +3175,6 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
        }
        return false;
 }
-#endif /* CONFIG_COMPACTION */
 
 /* Perform direct synchronous page reclaim */
 static int
@@ -4407,7 +4360,7 @@ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
        do {
                zone_type--;
                zone = pgdat->node_zones + zone_type;
-               if (populated_zone(zone)) {
+               if (managed_zone(zone)) {
                        zoneref_set_zone(zone,
                                &zonelist->_zonerefs[nr_zones++]);
                        check_highest_zone(zone_type);
@@ -4645,7 +4598,7 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
                for (j = 0; j < nr_nodes; j++) {
                        node = node_order[j];
                        z = &NODE_DATA(node)->node_zones[zone_type];
-                       if (populated_zone(z)) {
+                       if (managed_zone(z)) {
                                zoneref_set_zone(z,
                                        &zonelist->_zonerefs[pos++]);
                                check_highest_zone(zone_type);
index a3cc305..089328f 100644 (file)
@@ -134,30 +134,15 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
        return NULL;
 }
 
-static inline const char *check_heap_object(const void *ptr, unsigned long n,
-                                           bool to_user)
+/* Checks for allocs that are marked in some way as spanning multiple pages. */
+static inline const char *check_page_span(const void *ptr, unsigned long n,
+                                         struct page *page, bool to_user)
 {
-       struct page *page, *endpage;
+#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
        const void *end = ptr + n - 1;
+       struct page *endpage;
        bool is_reserved, is_cma;
 
-       /*
-        * Some architectures (arm64) return true for virt_addr_valid() on
-        * vmalloced addresses. Work around this by checking for vmalloc
-        * first.
-        */
-       if (is_vmalloc_addr(ptr))
-               return NULL;
-
-       if (!virt_addr_valid(ptr))
-               return NULL;
-
-       page = virt_to_head_page(ptr);
-
-       /* Check slab allocator for flags and size. */
-       if (PageSlab(page))
-               return __check_heap_object(ptr, n, page);
-
        /*
         * Sometimes the kernel data regions are not marked Reserved (see
         * check below). And sometimes [_sdata,_edata) does not cover
@@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
                   ((unsigned long)end & (unsigned long)PAGE_MASK)))
                return NULL;
 
-       /* Allow if start and end are inside the same compound page. */
+       /* Allow if fully inside the same compound (__GFP_COMP) page. */
        endpage = virt_to_head_page(end);
        if (likely(endpage == page))
                return NULL;
@@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
        is_reserved = PageReserved(page);
        is_cma = is_migrate_cma_page(page);
        if (!is_reserved && !is_cma)
-               goto reject;
+               return "<spans multiple pages>";
 
        for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
                page = virt_to_head_page(ptr);
                if (is_reserved && !PageReserved(page))
-                       goto reject;
+                       return "<spans Reserved and non-Reserved pages>";
                if (is_cma && !is_migrate_cma_page(page))
-                       goto reject;
+                       return "<spans CMA and non-CMA pages>";
        }
+#endif
 
        return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+                                           bool to_user)
+{
+       struct page *page;
+
+       /*
+        * Some architectures (arm64) return true for virt_addr_valid() on
+        * vmalloced addresses. Work around this by checking for vmalloc
+        * first.
+        */
+       if (is_vmalloc_addr(ptr))
+               return NULL;
+
+       if (!virt_addr_valid(ptr))
+               return NULL;
+
+       page = virt_to_head_page(ptr);
+
+       /* Check slab allocator for flags and size. */
+       if (PageSlab(page))
+               return __check_heap_object(ptr, n, page);
 
-reject:
-       return "<spans multiple pages>";
+       /* Verify object does not incorrectly span multiple pages. */
+       return check_page_span(ptr, n, page, to_user);
 }
 
 /*
index 374d95d..b1e12a1 100644 (file)
@@ -1665,7 +1665,7 @@ static bool inactive_reclaimable_pages(struct lruvec *lruvec,
 
        for (zid = sc->reclaim_idx; zid >= 0; zid--) {
                zone = &pgdat->node_zones[zid];
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
@@ -2036,7 +2036,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
                struct zone *zone = &pgdat->node_zones[zid];
                unsigned long inactive_zone, active_zone;
 
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                inactive_zone = zone_page_state(zone,
@@ -2171,7 +2171,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
 
                for (z = 0; z < MAX_NR_ZONES; z++) {
                        struct zone *zone = &pgdat->node_zones[z];
-                       if (!populated_zone(zone))
+                       if (!managed_zone(zone))
                                continue;
 
                        total_high_wmark += high_wmark_pages(zone);
@@ -2510,7 +2510,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
        /* If compaction would go ahead or the allocation would succeed, stop */
        for (z = 0; z <= sc->reclaim_idx; z++) {
                struct zone *zone = &pgdat->node_zones[z];
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
@@ -2840,7 +2840,7 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
 
        for (i = 0; i <= ZONE_NORMAL; i++) {
                zone = &pgdat->node_zones[i];
-               if (!populated_zone(zone) ||
+               if (!managed_zone(zone) ||
                    pgdat_reclaimable_pages(pgdat) == 0)
                        continue;
 
@@ -3141,7 +3141,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
        for (i = 0; i <= classzone_idx; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                if (!zone_balanced(zone, order, classzone_idx))
@@ -3169,7 +3169,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
        sc->nr_to_reclaim = 0;
        for (z = 0; z <= sc->reclaim_idx; z++) {
                zone = pgdat->node_zones + z;
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
@@ -3242,7 +3242,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
                if (buffer_heads_over_limit) {
                        for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
                                zone = pgdat->node_zones + i;
-                               if (!populated_zone(zone))
+                               if (!managed_zone(zone))
                                        continue;
 
                                sc.reclaim_idx = i;
@@ -3262,7 +3262,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
                 */
                for (i = classzone_idx; i >= 0; i--) {
                        zone = pgdat->node_zones + i;
-                       if (!populated_zone(zone))
+                       if (!managed_zone(zone))
                                continue;
 
                        if (zone_balanced(zone, sc.order, classzone_idx))
@@ -3508,7 +3508,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
        pg_data_t *pgdat;
        int z;
 
-       if (!populated_zone(zone))
+       if (!managed_zone(zone))
                return;
 
        if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
@@ -3522,7 +3522,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
        /* Only wake kswapd if all zones are unbalanced */
        for (z = 0; z <= classzone_idx; z++) {
                zone = pgdat->node_zones + z;
-               if (!populated_zone(zone))
+               if (!managed_zone(zone))
                        continue;
 
                if (zone_balanced(zone, order, classzone_idx))
index 8a43684..855b72f 100644 (file)
@@ -80,13 +80,10 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
 
        BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
 
-       if (dev->flags & IFF_NOARP)
+       if ((dev->flags & IFF_NOARP) ||
+           !pskb_may_pull(skb, arp_hdr_len(dev)))
                return;
 
-       if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
-               dev->stats.tx_dropped++;
-               return;
-       }
        parp = arp_hdr(skb);
 
        if (parp->ar_pro != htons(ETH_P_IP) ||
index a5423a1..c5fea93 100644 (file)
@@ -1138,7 +1138,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                } else {
                        err = br_ip6_multicast_add_group(br, port,
                                                         &grec->grec_mca, vid);
-                       if (!err)
+                       if (err)
                                break;
                }
        }
index 341caa0..d8ad73b 100644 (file)
@@ -134,17 +134,36 @@ void br_stp_disable_port(struct net_bridge_port *p)
                br_become_root_bridge(br);
 }
 
-static void br_stp_start(struct net_bridge *br)
+static int br_stp_call_user(struct net_bridge *br, char *arg)
 {
-       int r;
-       char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
+       char *argv[] = { BR_STP_PROG, br->dev->name, arg, NULL };
        char *envp[] = { NULL };
+       int rc;
+
+       /* call userspace STP and report program errors */
+       rc = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
+       if (rc > 0) {
+               if (rc & 0xff)
+                       br_debug(br, BR_STP_PROG " received signal %d\n",
+                                rc & 0x7f);
+               else
+                       br_debug(br, BR_STP_PROG " exited with code %d\n",
+                                (rc >> 8) & 0xff);
+       }
+
+       return rc;
+}
+
+static void br_stp_start(struct net_bridge *br)
+{
        struct net_bridge_port *p;
+       int err = -ENOENT;
 
        if (net_eq(dev_net(br->dev), &init_net))
-               r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
-       else
-               r = -ENOENT;
+               err = br_stp_call_user(br, "start");
+
+       if (err && err != -ENOENT)
+               br_err(br, "failed to start userspace STP (%d)\n", err);
 
        spin_lock_bh(&br->lock);
 
@@ -153,9 +172,10 @@ static void br_stp_start(struct net_bridge *br)
        else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
                __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
 
-       if (r == 0) {
+       if (!err) {
                br->stp_enabled = BR_USER_STP;
                br_debug(br, "userspace STP started\n");
+
                /* Stop hello and hold timers */
                del_timer(&br->hello_timer);
                list_for_each_entry(p, &br->port_list, list)
@@ -173,14 +193,13 @@ static void br_stp_start(struct net_bridge *br)
 
 static void br_stp_stop(struct net_bridge *br)
 {
-       int r;
-       char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
-       char *envp[] = { NULL };
        struct net_bridge_port *p;
+       int err;
 
        if (br->stp_enabled == BR_USER_STP) {
-               r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
-               br_info(br, "userspace STP stopped, return code %d\n", r);
+               err = br_stp_call_user(br, "stop");
+               if (err)
+                       br_err(br, "failed to stop userspace STP (%d)\n", err);
 
                /* To start timers on any ports left in blocking */
                mod_timer(&br->hello_timer, jiffies + br->hello_time);
index cceac5b..0833c25 100644 (file)
@@ -368,6 +368,8 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
 
        match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
        if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
+               if (!IS_ERR(match))
+                       module_put(match->me);
                request_module("ebt_%s", m->u.name);
                match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
        }
index 4b901d9..ad47a92 100644 (file)
@@ -86,6 +86,7 @@ static const struct nft_expr_ops nft_meta_bridge_set_ops = {
        .init           = nft_meta_set_init,
        .destroy        = nft_meta_set_destroy,
        .dump           = nft_meta_set_dump,
+       .validate       = nft_meta_set_validate,
 };
 
 static const struct nft_expr_ops *
index b0d307b..9dbece2 100644 (file)
@@ -3963,6 +3963,22 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
        return skb;
 }
 
+/**
+ *     netdev_is_rx_handler_busy - check if receive handler is registered
+ *     @dev: device to check
+ *
+ *     Check if a receive handler is already registered for a given device.
+ *     Return true if there one.
+ *
+ *     The caller must hold the rtnl_mutex.
+ */
+bool netdev_is_rx_handler_busy(struct net_device *dev)
+{
+       ASSERT_RTNL();
+       return dev && rtnl_dereference(dev->rx_handler);
+}
+EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
+
 /**
  *     netdev_rx_handler_register - register receive handler
  *     @dev: device to register a handler for
index a2879c0..1a7b80f 100644 (file)
@@ -750,11 +750,13 @@ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
 void __skb_get_hash(struct sk_buff *skb)
 {
        struct flow_keys keys;
+       u32 hash;
 
        __flow_hash_secret_init();
 
-       __skb_set_sw_hash(skb, ___skb_get_hash(skb, &keys, hashrnd),
-                         flow_keys_have_l4(&keys));
+       hash = ___skb_get_hash(skb, &keys, hashrnd);
+
+       __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
index ff7736f..96e47c5 100644 (file)
@@ -38,4 +38,7 @@ config NET_DSA_TAG_EDSA
 config NET_DSA_TAG_TRAILER
        bool
 
+config NET_DSA_TAG_QCA
+       bool
+
 endif
index 8af4ded..a3380ed 100644 (file)
@@ -7,3 +7,4 @@ dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
 dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
 dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
 dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
+dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
index d8d267e..66e31ac 100644 (file)
@@ -53,6 +53,9 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
 #endif
 #ifdef CONFIG_NET_DSA_TAG_BRCM
        [DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops,
+#endif
+#ifdef CONFIG_NET_DSA_TAG_QCA
+       [DSA_TAG_PROTO_QCA] = &qca_netdev_ops,
 #endif
        [DSA_TAG_PROTO_NONE] = &none_ops,
 };
index 00077a9..6cfd738 100644 (file)
@@ -81,5 +81,7 @@ extern const struct dsa_device_ops trailer_netdev_ops;
 /* tag_brcm.c */
 extern const struct dsa_device_ops brcm_netdev_ops;
 
+/* tag_qca.c */
+extern const struct dsa_device_ops qca_netdev_ops;
 
 #endif
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
new file mode 100644 (file)
index 0000000..0c90cac
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/etherdevice.h>
+#include "dsa_priv.h"
+
+#define QCA_HDR_LEN    2
+#define QCA_HDR_VERSION        0x2
+
+#define QCA_HDR_RECV_VERSION_MASK      GENMASK(15, 14)
+#define QCA_HDR_RECV_VERSION_S         14
+#define QCA_HDR_RECV_PRIORITY_MASK     GENMASK(13, 11)
+#define QCA_HDR_RECV_PRIORITY_S                11
+#define QCA_HDR_RECV_TYPE_MASK         GENMASK(10, 6)
+#define QCA_HDR_RECV_TYPE_S            6
+#define QCA_HDR_RECV_FRAME_IS_TAGGED   BIT(3)
+#define QCA_HDR_RECV_SOURCE_PORT_MASK  GENMASK(2, 0)
+
+#define QCA_HDR_XMIT_VERSION_MASK      GENMASK(15, 14)
+#define QCA_HDR_XMIT_VERSION_S         14
+#define QCA_HDR_XMIT_PRIORITY_MASK     GENMASK(13, 11)
+#define QCA_HDR_XMIT_PRIORITY_S                11
+#define QCA_HDR_XMIT_CONTROL_MASK      GENMASK(10, 8)
+#define QCA_HDR_XMIT_CONTROL_S         8
+#define QCA_HDR_XMIT_FROM_CPU          BIT(7)
+#define QCA_HDR_XMIT_DP_BIT_MASK       GENMASK(6, 0)
+
+static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       u16 *phdr, hdr;
+
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += skb->len;
+
+       if (skb_cow_head(skb, 0) < 0)
+               goto out_free;
+
+       skb_push(skb, QCA_HDR_LEN);
+
+       memmove(skb->data, skb->data + QCA_HDR_LEN, 2 * ETH_ALEN);
+       phdr = (u16 *)(skb->data + 2 * ETH_ALEN);
+
+       /* Set the version field, and set destination port information */
+       hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S |
+               QCA_HDR_XMIT_FROM_CPU |
+               BIT(p->port);
+
+       *phdr = htons(hdr);
+
+       return skb;
+
+out_free:
+       kfree_skb(skb);
+       return NULL;
+}
+
+static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
+                      struct packet_type *pt, struct net_device *orig_dev)
+{
+       struct dsa_switch_tree *dst = dev->dsa_ptr;
+       struct dsa_switch *ds;
+       u8 ver;
+       int port;
+       __be16 *phdr, hdr;
+
+       if (unlikely(!dst))
+               goto out_drop;
+
+       skb = skb_unshare(skb, GFP_ATOMIC);
+       if (!skb)
+               goto out;
+
+       if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN)))
+               goto out_drop;
+
+       /* The QCA header is added by the switch between src addr and Ethertype
+        * At this point, skb->data points to ethertype so header should be
+        * right before
+        */
+       phdr = (__be16 *)(skb->data - 2);
+       hdr = ntohs(*phdr);
+
+       /* Make sure the version is correct */
+       ver = (hdr & QCA_HDR_RECV_VERSION_MASK) >> QCA_HDR_RECV_VERSION_S;
+       if (unlikely(ver != QCA_HDR_VERSION))
+               goto out_drop;
+
+       /* Remove QCA tag and recalculate checksum */
+       skb_pull_rcsum(skb, QCA_HDR_LEN);
+       memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - QCA_HDR_LEN,
+               ETH_HLEN - QCA_HDR_LEN);
+
+       /* This protocol doesn't support cascading multiple switches so it's
+        * safe to assume the switch is first in the tree
+        */
+       ds = dst->ds[0];
+       if (!ds)
+               goto out_drop;
+
+       /* Get source port information */
+       port = (hdr & QCA_HDR_RECV_SOURCE_PORT_MASK);
+       if (!ds->ports[port].netdev)
+               goto out_drop;
+
+       /* Update skb & forward the frame accordingly */
+       skb_push(skb, ETH_HLEN);
+       skb->pkt_type = PACKET_HOST;
+       skb->dev = ds->ports[port].netdev;
+       skb->protocol = eth_type_trans(skb, skb->dev);
+
+       skb->dev->stats.rx_packets++;
+       skb->dev->stats.rx_bytes += skb->len;
+
+       netif_receive_skb(skb);
+
+       return 0;
+
+out_drop:
+       kfree_skb(skb);
+out:
+       return 0;
+}
+
+const struct dsa_device_ops qca_netdev_ops = {
+       .xmit   = qca_tag_xmit,
+       .rcv    = qca_tag_rcv,
+};
index 415e117..062a67c 100644 (file)
@@ -2232,7 +2232,7 @@ static struct devinet_sysctl_table {
 };
 
 static int __devinet_sysctl_register(struct net *net, char *dev_name,
-                                       struct ipv4_devconf *p)
+                                    int ifindex, struct ipv4_devconf *p)
 {
        int i;
        struct devinet_sysctl_table *t;
@@ -2255,6 +2255,8 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
                goto free;
 
        p->sysctl = t;
+
+       inet_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
        return 0;
 
 free:
@@ -2286,7 +2288,7 @@ static int devinet_sysctl_register(struct in_device *idev)
        if (err)
                return err;
        err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
-                                       &idev->cnf);
+                                       idev->dev->ifindex, &idev->cnf);
        if (err)
                neigh_sysctl_unregister(idev->arp_parms);
        return err;
@@ -2347,11 +2349,12 @@ static __net_init int devinet_init_net(struct net *net)
        }
 
 #ifdef CONFIG_SYSCTL
-       err = __devinet_sysctl_register(net, "all", all);
+       err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
        if (err < 0)
                goto err_reg_all;
 
-       err = __devinet_sysctl_register(net, "default", dflt);
+       err = __devinet_sysctl_register(net, "default",
+                                       NETCONFA_IFINDEX_DEFAULT, dflt);
        if (err < 0)
                goto err_reg_dflt;
 
index 317c319..4e56a4c 100644 (file)
@@ -503,6 +503,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
                if (!dev)
                        return -ENODEV;
                cfg->fc_oif = dev->ifindex;
+               cfg->fc_table = l3mdev_fib_table(dev);
                if (colon) {
                        struct in_ifaddr *ifa;
                        struct in_device *in_dev = __in_dev_get_rtnl(dev);
@@ -1021,7 +1022,7 @@ no_promotions:
                         * First of all, we scan fib_info list searching
                         * for stray nexthop entries, then ignite fib_flush.
                         */
-                       if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
+                       if (fib_sync_down_addr(dev, ifa->ifa_local))
                                fib_flush(dev_net(dev));
                }
        }
index 8066ccc..388d3e2 100644 (file)
@@ -1057,6 +1057,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        fi->fib_priority = cfg->fc_priority;
        fi->fib_prefsrc = cfg->fc_prefsrc;
        fi->fib_type = cfg->fc_type;
+       fi->fib_tb_id = cfg->fc_table;
 
        fi->fib_nhs = nhs;
        change_nexthops(fi) {
@@ -1337,18 +1338,21 @@ nla_put_failure:
  *   referring to it.
  * - device went down -> we must shutdown all nexthops going via it.
  */
-int fib_sync_down_addr(struct net *net, __be32 local)
+int fib_sync_down_addr(struct net_device *dev, __be32 local)
 {
        int ret = 0;
        unsigned int hash = fib_laddr_hashfn(local);
        struct hlist_head *head = &fib_info_laddrhash[hash];
+       struct net *net = dev_net(dev);
+       int tb_id = l3mdev_fib_table(dev);
        struct fib_info *fi;
 
        if (!fib_info_laddrhash || local == 0)
                return 0;
 
        hlist_for_each_entry(fi, head, fib_lhash) {
-               if (!net_eq(fi->fib_net, net))
+               if (!net_eq(fi->fib_net, net) ||
+                   fi->fib_tb_id != tb_id)
                        continue;
                if (fi->fib_prefsrc == local) {
                        fi->fib_flags |= RTNH_F_DEAD;
index 95649eb..5719d6b 100644 (file)
@@ -55,6 +55,7 @@
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
 #include <net/udp.h>
+#include <net/dst_metadata.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
@@ -546,6 +547,81 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
        return 0;
 }
 
+void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
+{
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+       u32 headroom = sizeof(struct iphdr);
+       struct ip_tunnel_info *tun_info;
+       const struct ip_tunnel_key *key;
+       const struct iphdr *inner_iph;
+       struct rtable *rt;
+       struct flowi4 fl4;
+       __be16 df = 0;
+       u8 tos, ttl;
+
+       tun_info = skb_tunnel_info(skb);
+       if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+                    ip_tunnel_info_af(tun_info) != AF_INET))
+               goto tx_error;
+       key = &tun_info->key;
+       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+       inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+       tos = key->tos;
+       if (tos == 1) {
+               if (skb->protocol == htons(ETH_P_IP))
+                       tos = inner_iph->tos;
+               else if (skb->protocol == htons(ETH_P_IPV6))
+                       tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
+       }
+       init_tunnel_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0,
+                        RT_TOS(tos), tunnel->parms.link);
+       if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
+               goto tx_error;
+       rt = ip_route_output_key(tunnel->net, &fl4);
+       if (IS_ERR(rt)) {
+               dev->stats.tx_carrier_errors++;
+               goto tx_error;
+       }
+       if (rt->dst.dev == dev) {
+               ip_rt_put(rt);
+               dev->stats.collisions++;
+               goto tx_error;
+       }
+       tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
+       ttl = key->ttl;
+       if (ttl == 0) {
+               if (skb->protocol == htons(ETH_P_IP))
+                       ttl = inner_iph->ttl;
+               else if (skb->protocol == htons(ETH_P_IPV6))
+                       ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
+               else
+                       ttl = ip4_dst_hoplimit(&rt->dst);
+       }
+       if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
+               df = htons(IP_DF);
+       else if (skb->protocol == htons(ETH_P_IP))
+               df = inner_iph->frag_off & htons(IP_DF);
+       headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
+       if (headroom > dev->needed_headroom)
+               dev->needed_headroom = headroom;
+
+       if (skb_cow_head(skb, dev->needed_headroom)) {
+               ip_rt_put(rt);
+               goto tx_dropped;
+       }
+       iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, key->tos,
+                     key->ttl, df, !net_eq(tunnel->net, dev_net(dev)));
+       return;
+tx_error:
+       dev->stats.tx_errors++;
+       goto kfree;
+tx_dropped:
+       dev->stats.tx_dropped++;
+kfree:
+       kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit);
+
 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                    const struct iphdr *tnl_params, u8 protocol)
 {
index 4ae3f8e..c939258 100644 (file)
 #include <net/xfrm.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <net/dst_metadata.h>
 
 static bool log_ecn_error = true;
 module_param(log_ecn_error, bool, 0644);
@@ -193,6 +194,7 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
 {
        struct net *net = dev_net(skb->dev);
        struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
+       struct metadata_dst *tun_dst = NULL;
        struct ip_tunnel *tunnel;
        const struct iphdr *iph;
 
@@ -216,7 +218,12 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
                        tpi = &ipip_tpi;
                if (iptunnel_pull_header(skb, 0, tpi->proto, false))
                        goto drop;
-               return ip_tunnel_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+               if (tunnel->collect_md) {
+                       tun_dst = ip_tun_rx_dst(skb, 0, 0, 0);
+                       if (!tun_dst)
+                               return 0;
+               }
+               return ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
        }
 
        return -1;
@@ -270,7 +277,10 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
 
        skb_set_inner_ipproto(skb, ipproto);
 
-       ip_tunnel_xmit(skb, dev, tiph, ipproto);
+       if (tunnel->collect_md)
+               ip_md_tunnel_xmit(skb, dev, ipproto);
+       else
+               ip_tunnel_xmit(skb, dev, tiph, ipproto);
        return NETDEV_TX_OK;
 
 tx_error:
@@ -380,13 +390,14 @@ static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
 }
 
 static void ipip_netlink_parms(struct nlattr *data[],
-                              struct ip_tunnel_parm *parms)
+                              struct ip_tunnel_parm *parms, bool *collect_md)
 {
        memset(parms, 0, sizeof(*parms));
 
        parms->iph.version = 4;
        parms->iph.protocol = IPPROTO_IPIP;
        parms->iph.ihl = 5;
+       *collect_md = false;
 
        if (!data)
                return;
@@ -414,6 +425,9 @@ static void ipip_netlink_parms(struct nlattr *data[],
 
        if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
                parms->iph.frag_off = htons(IP_DF);
+
+       if (data[IFLA_IPTUN_COLLECT_METADATA])
+               *collect_md = true;
 }
 
 /* This function returns true when ENCAP attributes are present in the nl msg */
@@ -453,18 +467,18 @@ static bool ipip_netlink_encap_parms(struct nlattr *data[],
 static int ipip_newlink(struct net *src_net, struct net_device *dev,
                        struct nlattr *tb[], struct nlattr *data[])
 {
+       struct ip_tunnel *t = netdev_priv(dev);
        struct ip_tunnel_parm p;
        struct ip_tunnel_encap ipencap;
 
        if (ipip_netlink_encap_parms(data, &ipencap)) {
-               struct ip_tunnel *t = netdev_priv(dev);
                int err = ip_tunnel_encap_setup(t, &ipencap);
 
                if (err < 0)
                        return err;
        }
 
-       ipip_netlink_parms(data, &p);
+       ipip_netlink_parms(data, &p, &t->collect_md);
        return ip_tunnel_newlink(dev, tb, &p);
 }
 
@@ -473,6 +487,7 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
 {
        struct ip_tunnel_parm p;
        struct ip_tunnel_encap ipencap;
+       bool collect_md;
 
        if (ipip_netlink_encap_parms(data, &ipencap)) {
                struct ip_tunnel *t = netdev_priv(dev);
@@ -482,7 +497,9 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
                        return err;
        }
 
-       ipip_netlink_parms(data, &p);
+       ipip_netlink_parms(data, &p, &collect_md);
+       if (collect_md)
+               return -EINVAL;
 
        if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
            (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
@@ -516,6 +533,8 @@ static size_t ipip_get_size(const struct net_device *dev)
                nla_total_size(2) +
                /* IFLA_IPTUN_ENCAP_DPORT */
                nla_total_size(2) +
+               /* IFLA_IPTUN_COLLECT_METADATA */
+               nla_total_size(0) +
                0;
 }
 
@@ -544,6 +563,9 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
                        tunnel->encap.flags))
                goto nla_put_failure;
 
+       if (tunnel->collect_md)
+               if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
+                       goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -562,6 +584,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
        [IFLA_IPTUN_ENCAP_FLAGS]        = { .type = NLA_U16 },
        [IFLA_IPTUN_ENCAP_SPORT]        = { .type = NLA_U16 },
        [IFLA_IPTUN_ENCAP_DPORT]        = { .type = NLA_U16 },
+       [IFLA_IPTUN_COLLECT_METADATA]   = { .type = NLA_FLAG },
 };
 
 static struct rtnl_link_ops ipip_link_ops __read_mostly = {
index c24f41c..2c2553b 100644 (file)
@@ -46,6 +46,7 @@ static const struct nft_expr_ops nft_reject_ipv4_ops = {
        .eval           = nft_reject_ipv4_eval,
        .init           = nft_reject_init,
        .dump           = nft_reject_dump,
+       .validate       = nft_reject_validate,
 };
 
 static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
index a13fcb3..7dae800 100644 (file)
@@ -1020,17 +1020,31 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
 }
 EXPORT_SYMBOL(tcp_sendpage);
 
-static inline int select_size(const struct sock *sk, bool sg)
+/* Do not bother using a page frag for very small frames.
+ * But use this heuristic only for the first skb in write queue.
+ *
+ * Having no payload in skb->head allows better SACK shifting
+ * in tcp_shift_skb_data(), reducing sack/rack overhead, because
+ * write queue has less skbs.
+ * Each skb can hold up to MAX_SKB_FRAGS * 32Kbytes, or ~0.5 MB.
+ * This also speeds up tso_fragment(), since it wont fallback
+ * to tcp_fragment().
+ */
+static int linear_payload_sz(bool first_skb)
+{
+       if (first_skb)
+               return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
+       return 0;
+}
+
+static int select_size(const struct sock *sk, bool sg, bool first_skb)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        int tmp = tp->mss_cache;
 
        if (sg) {
                if (sk_can_gso(sk)) {
-                       /* Small frames wont use a full page:
-                        * Payload will immediately follow tcp header.
-                        */
-                       tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
+                       tmp = linear_payload_sz(first_skb);
                } else {
                        int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
 
@@ -1161,6 +1175,8 @@ restart:
                }
 
                if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
+                       bool first_skb;
+
 new_segment:
                        /* Allocate new segment. If the interface is SG,
                         * allocate skb fitting to single page.
@@ -1172,10 +1188,11 @@ new_segment:
                                process_backlog = false;
                                goto restart;
                        }
+                       first_skb = skb_queue_empty(&sk->sk_write_queue);
                        skb = sk_stream_alloc_skb(sk,
-                                                 select_size(sk, sg),
+                                                 select_size(sk, sg, first_skb),
                                                  sk->sk_allocation,
-                                                 skb_queue_empty(&sk->sk_write_queue));
+                                                 first_skb);
                        if (!skb)
                                goto wait_for_memory;
 
index 54d9f9b..4e777a3 100644 (file)
@@ -150,6 +150,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
        tp->segs_in = 0;
        tcp_segs_in(tp, skb);
        __skb_pull(skb, tcp_hdrlen(skb));
+       sk_forced_mem_schedule(sk, skb->truesize);
        skb_set_owner_r(skb, sk);
 
        TCP_SKB_CB(skb)->seq++;
@@ -226,6 +227,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
        tcp_fastopen_add_skb(child, skb);
 
        tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
+       tp->rcv_wup = tp->rcv_nxt;
        /* tcp_conn_request() is sending the SYNACK,
         * and queues the child into listener accept queue.
         */
index 70b892d..dad3e7e 100644 (file)
@@ -4502,7 +4502,7 @@ coalesce_done:
                                NET_INC_STATS(sock_net(sk),
                                              LINUX_MIB_TCPOFOMERGE);
                                __kfree_skb(skb1);
-                               goto add_sack;
+                               goto merge_right;
                        }
                } else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
                        goto coalesce_done;
@@ -4514,6 +4514,7 @@ insert:
        rb_link_node(&skb->rbnode, parent, p);
        rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
 
+merge_right:
        /* Remove other segments covered by skb. */
        while ((q = rb_next(&skb->rbnode)) != NULL) {
                skb1 = rb_entry(q, struct sk_buff, rbnode);
index 028eb04..9c5fc97 100644 (file)
@@ -76,7 +76,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
 
        else if (!yeah->doing_reno_now) {
index 3155ed7..6a7ff69 100644 (file)
@@ -29,7 +29,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
        memset(fl4, 0, sizeof(*fl4));
        fl4->daddr = daddr->a4;
        fl4->flowi4_tos = tos;
-       fl4->flowi4_oif = oif;
+       fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
        if (saddr)
                fl4->saddr = saddr->a4;
 
index f418d2e..2f1f5d4 100644 (file)
@@ -778,7 +778,14 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
        }
 
        if (p == &net->ipv6.devconf_all->forwarding) {
+               int old_dflt = net->ipv6.devconf_dflt->forwarding;
+
                net->ipv6.devconf_dflt->forwarding = newf;
+               if ((!newf) ^ (!old_dflt))
+                       inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+                                                    NETCONFA_IFINDEX_DEFAULT,
+                                                    net->ipv6.devconf_dflt);
+
                addrconf_forward_change(net, newf);
                if ((!newf) ^ (!old))
                        inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
@@ -1941,6 +1948,7 @@ errdad:
        spin_unlock_bh(&ifp->lock);
 
        addrconf_mod_dad_work(ifp, 0);
+       in6_ifa_put(ifp);
 }
 
 /* Join to solicited addr multicast group.
@@ -3850,6 +3858,7 @@ static void addrconf_dad_work(struct work_struct *w)
                addrconf_dad_begin(ifp);
                goto out;
        } else if (action == DAD_ABORT) {
+               in6_ifa_hold(ifp);
                addrconf_dad_stop(ifp, 1);
                if (disable_ipv6)
                        addrconf_ifdown(idev->dev, 0);
@@ -6025,7 +6034,7 @@ static const struct ctl_table addrconf_sysctl[] = {
 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
                struct inet6_dev *idev, struct ipv6_devconf *p)
 {
-       int i;
+       int i, ifindex;
        struct ctl_table *table;
        char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
 
@@ -6045,6 +6054,13 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
        if (!p->sysctl_header)
                goto free;
 
+       if (!strcmp(dev_name, "all"))
+               ifindex = NETCONFA_IFINDEX_ALL;
+       else if (!strcmp(dev_name, "default"))
+               ifindex = NETCONFA_IFINDEX_DEFAULT;
+       else
+               ifindex = idev->dev->ifindex;
+       inet6_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
        return 0;
 
 free:
index 2050217..6a66adb 100644 (file)
@@ -57,6 +57,7 @@
 #include <net/inet_ecn.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <net/dst_metadata.h>
 
 MODULE_AUTHOR("Ville Nuorvala");
 MODULE_DESCRIPTION("IPv6 tunneling device");
@@ -90,6 +91,7 @@ struct ip6_tnl_net {
        struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
        struct ip6_tnl __rcu *tnls_wc[1];
        struct ip6_tnl __rcu **tnls[2];
+       struct ip6_tnl __rcu *collect_md_tun;
 };
 
 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
@@ -166,6 +168,10 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
                        return t;
        }
 
+       t = rcu_dereference(ip6n->collect_md_tun);
+       if (t)
+               return t;
+
        t = rcu_dereference(ip6n->tnls_wc[0]);
        if (t && (t->dev->flags & IFF_UP))
                return t;
@@ -209,6 +215,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
 {
        struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
 
+       if (t->parms.collect_md)
+               rcu_assign_pointer(ip6n->collect_md_tun, t);
        rcu_assign_pointer(t->next , rtnl_dereference(*tp));
        rcu_assign_pointer(*tp, t);
 }
@@ -224,6 +232,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
        struct ip6_tnl __rcu **tp;
        struct ip6_tnl *iter;
 
+       if (t->parms.collect_md)
+               rcu_assign_pointer(ip6n->collect_md_tun, NULL);
+
        for (tp = ip6_tnl_bucket(ip6n, &t->parms);
             (iter = rtnl_dereference(*tp)) != NULL;
             tp = &iter->next) {
@@ -829,6 +840,9 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
 
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
 
+       if (tun_dst)
+               skb_dst_set(skb, (struct dst_entry *)tun_dst);
+
        gro_cells_receive(&tunnel->gro_cells, skb);
        return 0;
 
@@ -865,6 +879,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
 {
        struct ip6_tnl *t;
        const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct metadata_dst *tun_dst = NULL;
        int ret = -1;
 
        rcu_read_lock();
@@ -881,7 +896,12 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
                        goto drop;
                if (iptunnel_pull_header(skb, 0, tpi->proto, false))
                        goto drop;
-               ret = __ip6_tnl_rcv(t, skb, tpi, NULL, dscp_ecn_decapsulate,
+               if (t->parms.collect_md) {
+                       tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
+                       if (!tun_dst)
+                               return 0;
+               }
+               ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
                                    log_ecn_error);
        }
 
@@ -1012,8 +1032,16 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
        int mtu;
        unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
        unsigned int max_headroom = psh_hlen;
+       u8 hop_limit;
        int err = -1;
 
+       if (t->parms.collect_md) {
+               hop_limit = skb_tunnel_info(skb)->key.ttl;
+               goto route_lookup;
+       } else {
+               hop_limit = t->parms.hop_limit;
+       }
+
        /* NBMA tunnel */
        if (ipv6_addr_any(&t->parms.raddr)) {
                struct in6_addr *addr6;
@@ -1043,6 +1071,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
                goto tx_err_link_failure;
 
        if (!dst) {
+route_lookup:
                dst = ip6_route_output(net, NULL, fl6);
 
                if (dst->error)
@@ -1053,6 +1082,10 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
                        dst = NULL;
                        goto tx_err_link_failure;
                }
+               if (t->parms.collect_md &&
+                   ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
+                                      &fl6->daddr, 0, &fl6->saddr))
+                       goto tx_err_link_failure;
                ndst = dst;
        }
 
@@ -1071,7 +1104,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
        }
        if (mtu < IPV6_MIN_MTU)
                mtu = IPV6_MIN_MTU;
-       if (skb_dst(skb))
+       if (skb_dst(skb) && !t->parms.collect_md)
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
        if (skb->len > mtu && !skb_is_gso(skb)) {
                *pmtu = mtu;
@@ -1111,8 +1144,13 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
                skb = new_skb;
        }
 
-       if (!fl6->flowi6_mark && ndst)
-               dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
+       if (t->parms.collect_md) {
+               if (t->encap.type != TUNNEL_ENCAP_NONE)
+                       goto tx_err_dst_release;
+       } else {
+               if (!fl6->flowi6_mark && ndst)
+                       dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
+       }
        skb_dst_set(skb, dst);
 
        if (encap_limit >= 0) {
@@ -1137,7 +1175,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
        ipv6h = ipv6_hdr(skb);
        ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
                     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
-       ipv6h->hop_limit = t->parms.hop_limit;
+       ipv6h->hop_limit = hop_limit;
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
        ipv6h->daddr = fl6->daddr;
@@ -1170,18 +1208,34 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        if (tproto != IPPROTO_IPIP && tproto != 0)
                return -1;
 
-       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-               encap_limit = t->parms.encap_limit;
+       dsfield = ipv4_get_dsfield(iph);
 
-       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+       if (t->parms.collect_md) {
+               struct ip_tunnel_info *tun_info;
+               const struct ip_tunnel_key *key;
 
-       dsfield = ipv4_get_dsfield(iph);
+               tun_info = skb_tunnel_info(skb);
+               if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+                            ip_tunnel_info_af(tun_info) != AF_INET6))
+                       return -1;
+               key = &tun_info->key;
+               memset(&fl6, 0, sizeof(fl6));
+               fl6.flowi6_proto = IPPROTO_IPIP;
+               fl6.daddr = key->u.ipv6.dst;
+               fl6.flowlabel = key->label;
+       } else {
+               if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+                       encap_limit = t->parms.encap_limit;
 
-       if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-               fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
-                                         & IPV6_TCLASS_MASK;
-       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
-               fl6.flowi6_mark = skb->mark;
+               memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+               fl6.flowi6_proto = IPPROTO_IPIP;
+
+               if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+                       fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
+                                        & IPV6_TCLASS_MASK;
+               if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+                       fl6.flowi6_mark = skb->mark;
+       }
 
        if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
                return -1;
@@ -1219,28 +1273,47 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
            ip6_tnl_addr_conflict(t, ipv6h))
                return -1;
 
-       offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
-       if (offset > 0) {
-               struct ipv6_tlv_tnl_enc_lim *tel;
-               tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
-               if (tel->encap_limit == 0) {
-                       icmpv6_send(skb, ICMPV6_PARAMPROB,
-                                   ICMPV6_HDR_FIELD, offset + 2);
+       dsfield = ipv6_get_dsfield(ipv6h);
+
+       if (t->parms.collect_md) {
+               struct ip_tunnel_info *tun_info;
+               const struct ip_tunnel_key *key;
+
+               tun_info = skb_tunnel_info(skb);
+               if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+                            ip_tunnel_info_af(tun_info) != AF_INET6))
                        return -1;
+               key = &tun_info->key;
+               memset(&fl6, 0, sizeof(fl6));
+               fl6.flowi6_proto = IPPROTO_IPV6;
+               fl6.daddr = key->u.ipv6.dst;
+               fl6.flowlabel = key->label;
+       } else {
+               offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+               if (offset > 0) {
+                       struct ipv6_tlv_tnl_enc_lim *tel;
+
+                       tel = (void *)&skb_network_header(skb)[offset];
+                       if (tel->encap_limit == 0) {
+                               icmpv6_send(skb, ICMPV6_PARAMPROB,
+                                           ICMPV6_HDR_FIELD, offset + 2);
+                               return -1;
+                       }
+                       encap_limit = tel->encap_limit - 1;
+               } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
+                       encap_limit = t->parms.encap_limit;
                }
-               encap_limit = tel->encap_limit - 1;
-       } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-               encap_limit = t->parms.encap_limit;
 
-       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+               memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+               fl6.flowi6_proto = IPPROTO_IPV6;
 
-       dsfield = ipv6_get_dsfield(ipv6h);
-       if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-               fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
-       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
-               fl6.flowlabel |= ip6_flowlabel(ipv6h);
-       if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
-               fl6.flowi6_mark = skb->mark;
+               if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+                       fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK);
+               if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
+                       fl6.flowlabel |= ip6_flowlabel(ipv6h);
+               if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+                       fl6.flowi6_mark = skb->mark;
+       }
 
        if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
                return -1;
@@ -1739,6 +1812,10 @@ static int ip6_tnl_dev_init(struct net_device *dev)
        if (err)
                return err;
        ip6_tnl_link_config(t);
+       if (t->parms.collect_md) {
+               dev->features |= NETIF_F_NETNS_LOCAL;
+               netif_keep_dst(dev);
+       }
        return 0;
 }
 
@@ -1809,6 +1886,9 @@ static void ip6_tnl_netlink_parms(struct nlattr *data[],
 
        if (data[IFLA_IPTUN_PROTO])
                parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+
+       if (data[IFLA_IPTUN_COLLECT_METADATA])
+               parms->collect_md = true;
 }
 
 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
@@ -1848,6 +1928,7 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
                           struct nlattr *tb[], struct nlattr *data[])
 {
        struct net *net = dev_net(dev);
+       struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
        struct ip6_tnl *nt, *t;
        struct ip_tunnel_encap ipencap;
 
@@ -1862,9 +1943,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
 
        ip6_tnl_netlink_parms(data, &nt->parms);
 
-       t = ip6_tnl_locate(net, &nt->parms, 0);
-       if (!IS_ERR(t))
-               return -EEXIST;
+       if (nt->parms.collect_md) {
+               if (rtnl_dereference(ip6n->collect_md_tun))
+                       return -EEXIST;
+       } else {
+               t = ip6_tnl_locate(net, &nt->parms, 0);
+               if (!IS_ERR(t))
+                       return -EEXIST;
+       }
 
        return ip6_tnl_create2(dev);
 }
@@ -1888,6 +1974,8 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
                        return err;
        }
        ip6_tnl_netlink_parms(data, &p);
+       if (p.collect_md)
+               return -EINVAL;
 
        t = ip6_tnl_locate(net, &p, 0);
        if (!IS_ERR(t)) {
@@ -1935,6 +2023,8 @@ static size_t ip6_tnl_get_size(const struct net_device *dev)
                nla_total_size(2) +
                /* IFLA_IPTUN_ENCAP_DPORT */
                nla_total_size(2) +
+               /* IFLA_IPTUN_COLLECT_METADATA */
+               nla_total_size(0) +
                0;
 }
 
@@ -1953,16 +2043,15 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
            nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
                goto nla_put_failure;
 
-       if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
-                       tunnel->encap.type) ||
-       nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
-                    tunnel->encap.sport) ||
-       nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
-                    tunnel->encap.dport) ||
-       nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
-                   tunnel->encap.flags))
+       if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
+           nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
+           nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
+           nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
                goto nla_put_failure;
 
+       if (parm->collect_md)
+               if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
+                       goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -1990,6 +2079,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
        [IFLA_IPTUN_ENCAP_FLAGS]        = { .type = NLA_U16 },
        [IFLA_IPTUN_ENCAP_SPORT]        = { .type = NLA_U16 },
        [IFLA_IPTUN_ENCAP_DPORT]        = { .type = NLA_U16 },
+       [IFLA_IPTUN_COLLECT_METADATA]   = { .type = NLA_FLAG },
 };
 
 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
index 533cd57..92bda99 100644 (file)
@@ -47,6 +47,7 @@ static const struct nft_expr_ops nft_reject_ipv6_ops = {
        .eval           = nft_reject_ipv6_eval,
        .init           = nft_reject_init,
        .dump           = nft_reject_dump,
+       .validate       = nft_reject_validate,
 };
 
 static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
index 0900352..0e983b6 100644 (file)
@@ -126,8 +126,10 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        rt = (struct rt6_info *) dst;
 
        np = inet6_sk(sk);
-       if (!np)
-               return -EBADF;
+       if (!np) {
+               err = -EBADF;
+               goto dst_err_out;
+       }
 
        if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
                fl6.flowi6_oif = np->mcast_oif;
@@ -163,6 +165,9 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        }
        release_sock(sk);
 
+dst_err_out:
+       dst_release(dst);
+
        if (err)
                return err;
 
index 0eaab1f..00a2d40 100644 (file)
@@ -23,6 +23,7 @@ int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
 
 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
 {
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
        XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
        XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
        return xfrm_input(skb, nexthdr, spi, 0);
index b7b7e86..e0f71c0 100644 (file)
@@ -36,7 +36,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
        int err;
 
        memset(&fl6, 0, sizeof(fl6));
-       fl6.flowi6_oif = oif;
+       fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
        fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
        memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
        if (saddr)
index 2632ac7..b7f869a 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/socket.h>
 #include <linux/uaccess.h>
 #include <linux/workqueue.h>
+#include <linux/syscalls.h>
 #include <net/kcm.h>
 #include <net/netns/generic.h>
 #include <net/sock.h>
@@ -1721,7 +1722,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        if (copy_to_user((void __user *)arg, &info,
                                         sizeof(info))) {
                                err = -EFAULT;
-                               sock_release(newsock);
+                               sys_close(info.fd);
                        }
                }
 
index 1e40dac..a2ed3bd 100644 (file)
@@ -1855,6 +1855,9 @@ static __net_exit void l2tp_exit_net(struct net *net)
                (void)l2tp_tunnel_delete(tunnel);
        }
        rcu_read_unlock_bh();
+
+       flush_workqueue(l2tp_wq);
+       rcu_barrier();
 }
 
 static struct pernet_operations l2tp_net_ops = {
index ef2cd30..965f7e3 100644 (file)
@@ -121,7 +121,7 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
 }
 
 
-static struct net_device_ops l2tp_eth_netdev_ops = {
+static const struct net_device_ops l2tp_eth_netdev_ops = {
        .ndo_init               = l2tp_eth_dev_init,
        .ndo_uninit             = l2tp_eth_dev_uninit,
        .ndo_start_xmit         = l2tp_eth_dev_xmit,
index 8ae3ed9..db916cf 100644 (file)
@@ -38,7 +38,7 @@ static u16 llc_ui_sap_link_no_max[256];
 static struct sockaddr_llc llc_ui_addrnull;
 static const struct proto_ops llc_ui_ops;
 
-static long llc_ui_wait_for_conn(struct sock *sk, long timeout);
+static bool llc_ui_wait_for_conn(struct sock *sk, long timeout);
 static int llc_ui_wait_for_disc(struct sock *sk, long timeout);
 static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
 
@@ -551,7 +551,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
        return rc;
 }
 
-static long llc_ui_wait_for_conn(struct sock *sk, long timeout)
+static bool llc_ui_wait_for_conn(struct sock *sk, long timeout)
 {
        DEFINE_WAIT(wait);
 
index b5d28f1..afca7d1 100644 (file)
@@ -333,10 +333,11 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
        if (!uc.center_freq1)
                return;
 
-       /* proceed to downgrade the chandef until usable or the same */
+       /* proceed to downgrade the chandef until usable or the same as AP BW */
        while (uc.width > max_width ||
-              !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
-                                             sdata->wdev.iftype))
+              (uc.width > sta->tdls_chandef.width &&
+               !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
+                                              sdata->wdev.iftype)))
                ieee80211_chandef_downgrade(&uc);
 
        if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
index 5eefe4a..75d696f 100644 (file)
@@ -30,7 +30,6 @@ nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
        if (!iph)
                return;
 
-       iph = ip_hdr(skb);
        if (iph->ihl < 5 || iph->version != 4)
                return;
 
index 70eb2f6..d44d89b 100644 (file)
@@ -343,12 +343,12 @@ static int nfnl_acct_del(struct net *net, struct sock *nfnl,
                         struct sk_buff *skb, const struct nlmsghdr *nlh,
                         const struct nlattr * const tb[])
 {
-       char *acct_name;
-       struct nf_acct *cur;
+       struct nf_acct *cur, *tmp;
        int ret = -ENOENT;
+       char *acct_name;
 
        if (!tb[NFACCT_NAME]) {
-               list_for_each_entry(cur, &net->nfnl_acct_list, head)
+               list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head)
                        nfnl_acct_try_del(cur);
 
                return 0;
index 68216cd..139e086 100644 (file)
@@ -98,31 +98,28 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
                break;
        }
 
-       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
-
-       /* This protocol is not supportted, skip. */
-       if (l4proto->l4proto != l4num) {
-               ret = -EOPNOTSUPP;
-               goto err_proto_put;
-       }
-
        if (matching) {
                if (nlh->nlmsg_flags & NLM_F_REPLACE) {
                        /* You cannot replace one timeout policy by another of
                         * different kind, sorry.
                         */
                        if (matching->l3num != l3num ||
-                           matching->l4proto->l4proto != l4num) {
-                               ret = -EINVAL;
-                               goto err_proto_put;
-                       }
-
-                       ret = ctnl_timeout_parse_policy(&matching->data,
-                                                       l4proto, net,
-                                                       cda[CTA_TIMEOUT_DATA]);
-                       return ret;
+                           matching->l4proto->l4proto != l4num)
+                               return -EINVAL;
+
+                       return ctnl_timeout_parse_policy(&matching->data,
+                                                        matching->l4proto, net,
+                                                        cda[CTA_TIMEOUT_DATA]);
                }
-               ret = -EBUSY;
+
+               return -EBUSY;
+       }
+
+       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+       /* This protocol is not supportted, skip. */
+       if (l4proto->l4proto != l4num) {
+               ret = -EOPNOTSUPP;
                goto err_proto_put;
        }
 
@@ -305,7 +302,16 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
        const struct hlist_nulls_node *nn;
        unsigned int last_hsize;
        spinlock_t *lock;
-       int i;
+       int i, cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+               spin_lock_bh(&pcpu->lock);
+               hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
+                       untimeout(h, timeout);
+               spin_unlock_bh(&pcpu->lock);
+       }
 
        local_bh_disable();
 restart:
@@ -350,12 +356,13 @@ static int cttimeout_del_timeout(struct net *net, struct sock *ctnl,
                                 const struct nlmsghdr *nlh,
                                 const struct nlattr * const cda[])
 {
-       struct ctnl_timeout *cur;
+       struct ctnl_timeout *cur, *tmp;
        int ret = -ENOENT;
        char *name;
 
        if (!cda[CTA_TIMEOUT_NAME]) {
-               list_for_each_entry(cur, &net->nfct_timeout_list, head)
+               list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list,
+                                        head)
                        ctnl_timeout_try_del(net, cur);
 
                return 0;
index 2863f34..8a6bc76 100644 (file)
@@ -291,10 +291,16 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
 }
 EXPORT_SYMBOL_GPL(nft_meta_get_init);
 
-static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
+int nft_meta_set_validate(const struct nft_ctx *ctx,
+                         const struct nft_expr *expr,
+                         const struct nft_data **data)
 {
+       struct nft_meta *priv = nft_expr_priv(expr);
        unsigned int hooks;
 
+       if (priv->key != NFT_META_PKTTYPE)
+               return 0;
+
        switch (ctx->afi->family) {
        case NFPROTO_BRIDGE:
                hooks = 1 << NF_BR_PRE_ROUTING;
@@ -308,6 +314,7 @@ static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
 
        return nft_chain_validate_hooks(ctx->chain, hooks);
 }
+EXPORT_SYMBOL_GPL(nft_meta_set_validate);
 
 int nft_meta_set_init(const struct nft_ctx *ctx,
                      const struct nft_expr *expr,
@@ -327,15 +334,16 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
                len = sizeof(u8);
                break;
        case NFT_META_PKTTYPE:
-               err = nft_meta_set_init_pkttype(ctx);
-               if (err)
-                       return err;
                len = sizeof(u8);
                break;
        default:
                return -EOPNOTSUPP;
        }
 
+       err = nft_meta_set_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
+
        priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
        err = nft_validate_register_load(priv->sreg, len);
        if (err < 0)
@@ -407,6 +415,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
        .init           = nft_meta_set_init,
        .destroy        = nft_meta_set_destroy,
        .dump           = nft_meta_set_dump,
+       .validate       = nft_meta_set_validate,
 };
 
 static const struct nft_expr_ops *
index 0522fc9..c64de3f 100644 (file)
@@ -26,11 +26,27 @@ const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
 };
 EXPORT_SYMBOL_GPL(nft_reject_policy);
 
+int nft_reject_validate(const struct nft_ctx *ctx,
+                       const struct nft_expr *expr,
+                       const struct nft_data **data)
+{
+       return nft_chain_validate_hooks(ctx->chain,
+                                       (1 << NF_INET_LOCAL_IN) |
+                                       (1 << NF_INET_FORWARD) |
+                                       (1 << NF_INET_LOCAL_OUT));
+}
+EXPORT_SYMBOL_GPL(nft_reject_validate);
+
 int nft_reject_init(const struct nft_ctx *ctx,
                    const struct nft_expr *expr,
                    const struct nlattr * const tb[])
 {
        struct nft_reject *priv = nft_expr_priv(expr);
+       int err;
+
+       err = nft_reject_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
 
        if (tb[NFTA_REJECT_TYPE] == NULL)
                return -EINVAL;
index 759ca52..e79d9ca 100644 (file)
@@ -66,7 +66,11 @@ static int nft_reject_inet_init(const struct nft_ctx *ctx,
                                const struct nlattr * const tb[])
 {
        struct nft_reject *priv = nft_expr_priv(expr);
-       int icmp_code;
+       int icmp_code, err;
+
+       err = nft_reject_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
 
        if (tb[NFTA_REJECT_TYPE] == NULL)
                return -EINVAL;
@@ -124,6 +128,7 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
        .eval           = nft_reject_inet_eval,
        .init           = nft_reject_inet_init,
        .dump           = nft_reject_inet_dump,
+       .validate       = nft_reject_validate,
 };
 
 static struct nft_expr_type nft_reject_inet_type __read_mostly = {
index 4fe9032..863e992 100644 (file)
@@ -71,6 +71,8 @@ struct ovs_frag_data {
 static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
 
 #define DEFERRED_ACTION_FIFO_SIZE 10
+#define OVS_RECURSION_LIMIT 5
+#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
 struct action_fifo {
        int head;
        int tail;
@@ -78,7 +80,12 @@ struct action_fifo {
        struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
 };
 
+struct recirc_keys {
+       struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
+};
+
 static struct action_fifo __percpu *action_fifos;
+static struct recirc_keys __percpu *recirc_keys;
 static DEFINE_PER_CPU(int, exec_actions_level);
 
 static void action_fifo_init(struct action_fifo *fifo)
@@ -1020,6 +1027,7 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
                          const struct nlattr *a, int rem)
 {
        struct deferred_action *da;
+       int level;
 
        if (!is_flow_key_valid(key)) {
                int err;
@@ -1043,6 +1051,18 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
                        return 0;
        }
 
+       level = this_cpu_read(exec_actions_level);
+       if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
+               struct recirc_keys *rks = this_cpu_ptr(recirc_keys);
+               struct sw_flow_key *recirc_key = &rks->key[level - 1];
+
+               *recirc_key = *key;
+               recirc_key->recirc_id = nla_get_u32(a);
+               ovs_dp_process_packet(skb, recirc_key);
+
+               return 0;
+       }
+
        da = add_deferred_actions(skb, key, NULL);
        if (da) {
                da->pkt_key.recirc_id = nla_get_u32(a);
@@ -1209,11 +1229,10 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
                        const struct sw_flow_actions *acts,
                        struct sw_flow_key *key)
 {
-       static const int ovs_recursion_limit = 5;
        int err, level;
 
        level = __this_cpu_inc_return(exec_actions_level);
-       if (unlikely(level > ovs_recursion_limit)) {
+       if (unlikely(level > OVS_RECURSION_LIMIT)) {
                net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
                                     ovs_dp_name(dp));
                kfree_skb(skb);
@@ -1238,10 +1257,17 @@ int action_fifos_init(void)
        if (!action_fifos)
                return -ENOMEM;
 
+       recirc_keys = alloc_percpu(struct recirc_keys);
+       if (!recirc_keys) {
+               free_percpu(action_fifos);
+               return -ENOMEM;
+       }
+
        return 0;
 }
 
 void action_fifos_exit(void)
 {
        free_percpu(action_fifos);
+       free_percpu(recirc_keys);
 }
index 1240ae3..0fa4543 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/in.h>
 #include <linux/rcupdate.h>
+#include <linux/cpumask.h>
 #include <linux/if_arp.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
@@ -72,32 +73,33 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
 {
        struct flow_stats *stats;
        int node = numa_node_id();
+       int cpu = smp_processor_id();
        int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
-       stats = rcu_dereference(flow->stats[node]);
+       stats = rcu_dereference(flow->stats[cpu]);
 
-       /* Check if already have node-specific stats. */
+       /* Check if already have CPU-specific stats. */
        if (likely(stats)) {
                spin_lock(&stats->lock);
                /* Mark if we write on the pre-allocated stats. */
-               if (node == 0 && unlikely(flow->stats_last_writer != node))
-                       flow->stats_last_writer = node;
+               if (cpu == 0 && unlikely(flow->stats_last_writer != cpu))
+                       flow->stats_last_writer = cpu;
        } else {
                stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
                spin_lock(&stats->lock);
 
-               /* If the current NUMA-node is the only writer on the
+               /* If the current CPU is the only writer on the
                 * pre-allocated stats keep using them.
                 */
-               if (unlikely(flow->stats_last_writer != node)) {
+               if (unlikely(flow->stats_last_writer != cpu)) {
                        /* A previous locker may have already allocated the
-                        * stats, so we need to check again.  If node-specific
+                        * stats, so we need to check again.  If CPU-specific
                         * stats were already allocated, we update the pre-
                         * allocated stats as we have already locked them.
                         */
-                       if (likely(flow->stats_last_writer != NUMA_NO_NODE)
-                           && likely(!rcu_access_pointer(flow->stats[node]))) {
-                               /* Try to allocate node-specific stats. */
+                       if (likely(flow->stats_last_writer != -1) &&
+                           likely(!rcu_access_pointer(flow->stats[cpu]))) {
+                               /* Try to allocate CPU-specific stats. */
                                struct flow_stats *new_stats;
 
                                new_stats =
@@ -114,12 +116,12 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
                                        new_stats->tcp_flags = tcp_flags;
                                        spin_lock_init(&new_stats->lock);
 
-                                       rcu_assign_pointer(flow->stats[node],
+                                       rcu_assign_pointer(flow->stats[cpu],
                                                           new_stats);
                                        goto unlock;
                                }
                        }
-                       flow->stats_last_writer = node;
+                       flow->stats_last_writer = cpu;
                }
        }
 
@@ -136,14 +138,15 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
                        struct ovs_flow_stats *ovs_stats,
                        unsigned long *used, __be16 *tcp_flags)
 {
-       int node;
+       int cpu;
 
        *used = 0;
        *tcp_flags = 0;
        memset(ovs_stats, 0, sizeof(*ovs_stats));
 
-       for_each_node(node) {
-               struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[node]);
+       /* We open code this to make sure cpu 0 is always considered */
+       for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) {
+               struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
 
                if (stats) {
                        /* Local CPU may write on non-local stats, so we must
@@ -163,10 +166,11 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
 /* Called with ovs_mutex. */
 void ovs_flow_stats_clear(struct sw_flow *flow)
 {
-       int node;
+       int cpu;
 
-       for_each_node(node) {
-               struct flow_stats *stats = ovsl_dereference(flow->stats[node]);
+       /* We open code this to make sure cpu 0 is always considered */
+       for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) {
+               struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
 
                if (stats) {
                        spin_lock_bh(&stats->lock);
index 156a302..ae783f5 100644 (file)
@@ -178,14 +178,14 @@ struct sw_flow {
                struct hlist_node node[2];
                u32 hash;
        } flow_table, ufid_table;
-       int stats_last_writer;          /* NUMA-node id of the last writer on
+       int stats_last_writer;          /* CPU id of the last writer on
                                         * 'stats[0]'.
                                         */
        struct sw_flow_key key;
        struct sw_flow_id id;
        struct sw_flow_mask *mask;
        struct sw_flow_actions __rcu *sf_acts;
-       struct flow_stats __rcu *stats[]; /* One for each NUMA node.  First one
+       struct flow_stats __rcu *stats[]; /* One for each CPU.  First one
                                           * is allocated at flow creation time,
                                           * the rest are allocated on demand
                                           * while holding the 'stats[0].lock'.
index d073fff..ea7a807 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/in.h>
 #include <linux/rcupdate.h>
+#include <linux/cpumask.h>
 #include <linux/if_arp.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
@@ -79,17 +80,12 @@ struct sw_flow *ovs_flow_alloc(void)
 {
        struct sw_flow *flow;
        struct flow_stats *stats;
-       int node;
 
-       flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+       flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
        if (!flow)
                return ERR_PTR(-ENOMEM);
 
-       flow->sf_acts = NULL;
-       flow->mask = NULL;
-       flow->id.unmasked_key = NULL;
-       flow->id.ufid_len = 0;
-       flow->stats_last_writer = NUMA_NO_NODE;
+       flow->stats_last_writer = -1;
 
        /* Initialize the default stat node. */
        stats = kmem_cache_alloc_node(flow_stats_cache,
@@ -102,10 +98,6 @@ struct sw_flow *ovs_flow_alloc(void)
 
        RCU_INIT_POINTER(flow->stats[0], stats);
 
-       for_each_node(node)
-               if (node != 0)
-                       RCU_INIT_POINTER(flow->stats[node], NULL);
-
        return flow;
 err:
        kmem_cache_free(flow_cache, flow);
@@ -142,16 +134,17 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
 
 static void flow_free(struct sw_flow *flow)
 {
-       int node;
+       int cpu;
 
        if (ovs_identifier_is_key(&flow->id))
                kfree(flow->id.unmasked_key);
        if (flow->sf_acts)
                ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
-       for_each_node(node)
-               if (flow->stats[node])
+       /* We open code this to make sure cpu 0 is always considered */
+       for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask))
+               if (flow->stats[cpu])
                        kmem_cache_free(flow_stats_cache,
-                                       (struct flow_stats __force *)flow->stats[node]);
+                                       (struct flow_stats __force *)flow->stats[cpu]);
        kmem_cache_free(flow_cache, flow);
 }
 
@@ -756,7 +749,7 @@ int ovs_flow_init(void)
        BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
 
        flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
-                                      + (nr_node_ids
+                                      + (nr_cpu_ids
                                          * sizeof(struct flow_stats *)),
                                       0, 0, NULL);
        if (flow_cache == NULL)
index 784c531..13396c7 100644 (file)
@@ -19,6 +19,13 @@ config AF_RXRPC
 
          See Documentation/networking/rxrpc.txt.
 
+config AF_RXRPC_IPV6
+       bool "IPv6 support for RxRPC"
+       depends on (IPV6 = m && AF_RXRPC = m) || (IPV6 = y && AF_RXRPC)
+       help
+         Say Y here to allow AF_RXRPC to use IPV6 UDP as well as IPV4 UDP as
+         its network transport.
+
 
 config AF_RXRPC_DEBUG
        bool "RxRPC dynamic debugging"
index caa226d..09f81be 100644 (file)
@@ -106,19 +106,25 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx,
        case AF_INET:
                if (srx->transport_len < sizeof(struct sockaddr_in))
                        return -EINVAL;
-               _debug("INET: %x @ %pI4",
-                      ntohs(srx->transport.sin.sin_port),
-                      &srx->transport.sin.sin_addr);
                tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad);
                break;
 
+#ifdef CONFIG_AF_RXRPC_IPV6
        case AF_INET6:
+               if (srx->transport_len < sizeof(struct sockaddr_in6))
+                       return -EINVAL;
+               tail = offsetof(struct sockaddr_rxrpc, transport) +
+                       sizeof(struct sockaddr_in6);
+               break;
+#endif
+
        default:
                return -EAFNOSUPPORT;
        }
 
        if (tail < len)
                memset((void *)srx + tail, 0, len - tail);
+       _debug("INET: %pISp", &srx->transport);
        return 0;
 }
 
@@ -299,7 +305,7 @@ void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
 {
        _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
        rxrpc_release_call(rxrpc_sk(sock->sk), call);
-       rxrpc_put_call(call, rxrpc_call_put);
+       rxrpc_put_call(call, rxrpc_call_put_kernel);
 }
 EXPORT_SYMBOL(rxrpc_kernel_end_call);
 
@@ -401,6 +407,23 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
 
        switch (rx->sk.sk_state) {
        case RXRPC_UNBOUND:
+               rx->srx.srx_family = AF_RXRPC;
+               rx->srx.srx_service = 0;
+               rx->srx.transport_type = SOCK_DGRAM;
+               rx->srx.transport.family = rx->family;
+               switch (rx->family) {
+               case AF_INET:
+                       rx->srx.transport_len = sizeof(struct sockaddr_in);
+                       break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+               case AF_INET6:
+                       rx->srx.transport_len = sizeof(struct sockaddr_in6);
+                       break;
+#endif
+               default:
+                       ret = -EAFNOSUPPORT;
+                       goto error_unlock;
+               }
                local = rxrpc_lookup_local(&rx->srx);
                if (IS_ERR(local)) {
                        ret = PTR_ERR(local);
@@ -551,7 +574,8 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
                return -EAFNOSUPPORT;
 
        /* we support transport protocol UDP/UDP6 only */
-       if (protocol != PF_INET)
+       if (protocol != PF_INET &&
+           IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6)
                return -EPROTONOSUPPORT;
 
        if (sock->type != SOCK_DGRAM)
index b1cb79e..e78c40b 100644 (file)
@@ -498,6 +498,7 @@ struct rxrpc_call {
         */
 #define RXRPC_RXTX_BUFF_SIZE   64
 #define RXRPC_RXTX_BUFF_MASK   (RXRPC_RXTX_BUFF_SIZE - 1)
+#define RXRPC_INIT_RX_WINDOW_SIZE 32
        struct sk_buff          **rxtx_buffer;
        u8                      *rxtx_annotations;
 #define RXRPC_TX_ANNO_ACK      0
@@ -518,7 +519,7 @@ struct rxrpc_call {
        rxrpc_seq_t             rx_expect_next; /* Expected next packet sequence number */
        u8                      rx_winsize;     /* Size of Rx window */
        u8                      tx_winsize;     /* Maximum size of Tx window */
-       u8                      nr_jumbo_dup;   /* Number of jumbo duplicates */
+       u8                      nr_jumbo_bad;   /* Number of jumbo dups/exceeds-windows */
 
        /* receive-phase ACK management */
        u8                      ackr_reason;    /* reason to ACK */
@@ -540,8 +541,10 @@ enum rxrpc_call_trace {
        rxrpc_call_seen,
        rxrpc_call_got,
        rxrpc_call_got_userid,
+       rxrpc_call_got_kernel,
        rxrpc_call_put,
        rxrpc_call_put_userid,
+       rxrpc_call_put_kernel,
        rxrpc_call_put_noqueue,
        rxrpc_call__nr_trace
 };
index b8acec0..26c293e 100644 (file)
@@ -121,7 +121,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
 
                call->user_call_ID = user_call_ID;
                call->notify_rx = notify_rx;
-               rxrpc_get_call(call, rxrpc_call_got);
+               rxrpc_get_call(call, rxrpc_call_got_kernel);
                user_attach_call(call, user_call_ID);
                rxrpc_get_call(call, rxrpc_call_got_userid);
                rb_link_node(&call->sock_node, parent, pp);
@@ -221,6 +221,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
                if (rx->discard_new_call) {
                        _debug("discard %lx", call->user_call_ID);
                        rx->discard_new_call(call, call->user_call_ID);
+                       rxrpc_put_call(call, rxrpc_call_put_kernel);
                }
                rxrpc_call_completed(call);
                rxrpc_release_call(rx, call);
@@ -300,6 +301,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
        smp_store_release(&b->call_backlog_tail,
                          (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
 
+       rxrpc_see_call(call);
        call->conn = conn;
        call->peer = rxrpc_get_peer(conn->params.peer);
        return call;
@@ -401,6 +403,13 @@ found_service:
        if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
                rxrpc_notify_socket(call);
 
+       /* We have to discard the prealloc queue's ref here and rely on a
+        * combination of the RCU read lock and refs held either by the socket
+        * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
+        * service to prevent the call from being deallocated too early.
+        */
+       rxrpc_put_call(call, rxrpc_call_put);
+
        _leave(" = %p{%d}", call, call->debug_id);
 out:
        spin_unlock(&rx->incoming_lock);
@@ -425,9 +434,11 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
 
        write_lock(&rx->call_lock);
 
-       ret = -ENODATA;
-       if (list_empty(&rx->to_be_accepted))
-               goto out;
+       if (list_empty(&rx->to_be_accepted)) {
+               write_unlock(&rx->call_lock);
+               kleave(" = -ENODATA [empty]");
+               return ERR_PTR(-ENODATA);
+       }
 
        /* check the user ID isn't already in use */
        pp = &rx->calls.rb_node;
@@ -466,7 +477,6 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
        }
 
        /* formalise the acceptance */
-       rxrpc_get_call(call, rxrpc_call_got);
        call->notify_rx = notify_rx;
        call->user_call_ID = user_call_ID;
        rxrpc_get_call(call, rxrpc_call_got_userid);
index 2b976e7..6143204 100644 (file)
@@ -95,7 +95,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
                break;
 
        case RXRPC_ACK_IDLE:
-               if (rxrpc_soft_ack_delay < expiry)
+               if (rxrpc_idle_ack_delay < expiry)
                        expiry = rxrpc_idle_ack_delay;
                break;
 
index 18ab13f..22f9b0d 100644 (file)
@@ -56,8 +56,10 @@ const char rxrpc_call_traces[rxrpc_call__nr_trace][4] = {
        [rxrpc_call_seen]               = "SEE",
        [rxrpc_call_got]                = "GOT",
        [rxrpc_call_got_userid]         = "Gus",
+       [rxrpc_call_got_kernel]         = "Gke",
        [rxrpc_call_put]                = "PUT",
        [rxrpc_call_put_userid]         = "Pus",
+       [rxrpc_call_put_kernel]         = "Pke",
        [rxrpc_call_put_noqueue]        = "PNQ",
 };
 
@@ -150,7 +152,7 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
        memset(&call->sock_node, 0xed, sizeof(call->sock_node));
 
        /* Leave space in the ring to handle a maxed-out jumbo packet */
-       call->rx_winsize = RXRPC_RXTX_BUFF_SIZE - 1 - 46;
+       call->rx_winsize = rxrpc_rx_window_size;
        call->tx_winsize = 16;
        call->rx_expect_next = 1;
        return call;
@@ -462,9 +464,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
                call->rxtx_buffer[i] = NULL;
        }
 
-       /* We have to release the prealloc backlog ref */
-       if (rxrpc_is_service_call(call))
-               rxrpc_put_call(call, rxrpc_call_put);
        _leave("");
 }
 
index ffa9add..bb1f292 100644 (file)
@@ -134,6 +134,16 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
                            srx.transport.sin.sin_addr.s_addr)
                                goto not_found;
                        break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+               case AF_INET6:
+                       if (peer->srx.transport.sin6.sin6_port !=
+                           srx.transport.sin6.sin6_port ||
+                           memcmp(&peer->srx.transport.sin6.sin6_addr,
+                                  &srx.transport.sin6.sin6_addr,
+                                  sizeof(struct in6_addr)) != 0)
+                               goto not_found;
+                       break;
+#endif
                default:
                        BUG();
                }
index afeba98..75af0bd 100644 (file)
@@ -59,6 +59,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to)
 
        spin_unlock(&call->lock);
 
+       wake_up(&call->waitq);
+
        while (list) {
                skb = list;
                list = skb->next;
@@ -125,7 +127,7 @@ static bool rxrpc_validate_jumbo(struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        unsigned int offset = sp->offset;
-       unsigned int len = skb->data_len;
+       unsigned int len = skb->len;
        int nr_jumbo = 1;
        u8 flags = sp->hdr.flags;
 
@@ -162,7 +164,7 @@ protocol_error:
  * (that information is encoded in the ACK packet).
  */
 static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
-                                u8 annotation, bool *_jumbo_dup)
+                                u8 annotation, bool *_jumbo_bad)
 {
        /* Discard normal packets that are duplicates. */
        if (annotation == 0)
@@ -172,9 +174,9 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
         * more partially duplicate jumbo packets, we refuse to take any more
         * jumbos for this call.
         */
-       if (!*_jumbo_dup) {
-               call->nr_jumbo_dup++;
-               *_jumbo_dup = true;
+       if (!*_jumbo_bad) {
+               call->nr_jumbo_bad++;
+               *_jumbo_bad = true;
        }
 }
 
@@ -189,12 +191,12 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
        unsigned int ix;
        rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
        rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
-       bool immediate_ack = false, jumbo_dup = false, queued;
+       bool immediate_ack = false, jumbo_bad = false, queued;
        u16 len;
        u8 ack = 0, flags, annotation = 0;
 
        _enter("{%u,%u},{%u,%u}",
-              call->rx_hard_ack, call->rx_top, skb->data_len, seq);
+              call->rx_hard_ack, call->rx_top, skb->len, seq);
 
        _proto("Rx DATA %%%u { #%u f=%02x }",
               sp->hdr.serial, seq, sp->hdr.flags);
@@ -220,7 +222,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
 
        flags = sp->hdr.flags;
        if (flags & RXRPC_JUMBO_PACKET) {
-               if (call->nr_jumbo_dup > 3) {
+               if (call->nr_jumbo_bad > 3) {
                        ack = RXRPC_ACK_NOSPACE;
                        ack_serial = serial;
                        goto ack;
@@ -231,7 +233,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
 next_subpacket:
        queued = false;
        ix = seq & RXRPC_RXTX_BUFF_MASK;
-       len = skb->data_len;
+       len = skb->len;
        if (flags & RXRPC_JUMBO_PACKET)
                len = RXRPC_JUMBO_DATALEN;
 
@@ -257,7 +259,7 @@ next_subpacket:
        }
 
        if (call->rxtx_buffer[ix]) {
-               rxrpc_input_dup_data(call, seq, annotation, &jumbo_dup);
+               rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
                if (ack != RXRPC_ACK_DUPLICATE) {
                        ack = RXRPC_ACK_DUPLICATE;
                        ack_serial = serial;
@@ -302,6 +304,15 @@ skip:
                annotation++;
                if (flags & RXRPC_JUMBO_PACKET)
                        annotation |= RXRPC_RX_ANNO_JLAST;
+               if (after(seq, hard_ack + call->rx_winsize)) {
+                       ack = RXRPC_ACK_EXCEEDS_WINDOW;
+                       ack_serial = serial;
+                       if (!jumbo_bad) {
+                               call->nr_jumbo_bad++;
+                               jumbo_bad = true;
+                       }
+                       goto ack;
+               }
 
                _proto("Rx DATA Jumbo %%%u", serial);
                goto next_subpacket;
@@ -331,14 +342,16 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_peer *peer;
        unsigned int mtu;
+       u32 rwind = ntohl(ackinfo->rwind);
 
        _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
               sp->hdr.serial,
               ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
-              ntohl(ackinfo->rwind), ntohl(ackinfo->jumbo_max));
+              rwind, ntohl(ackinfo->jumbo_max));
 
-       if (call->tx_winsize > ntohl(ackinfo->rwind))
-               call->tx_winsize = ntohl(ackinfo->rwind);
+       if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+               rwind = RXRPC_RXTX_BUFF_SIZE - 1;
+       call->tx_winsize = rwind;
 
        mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
 
@@ -442,7 +455,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
        }
 
        offset = sp->offset + nr_acks + 3;
-       if (skb->data_len >= offset + sizeof(buf.info)) {
+       if (skb->len >= offset + sizeof(buf.info)) {
                if (skb_copy_bits(skb, offset, &buf.info, sizeof(buf.info)) < 0)
                        return rxrpc_proto_abort("XAI", call, 0);
                rxrpc_input_ackinfo(call, skb, &buf.info);
index cdd58e6..f073e93 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/net.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
-#include <linux/udp.h>
-#include <linux/ip.h>
 #include <net/sock.h>
 #include <net/af_rxrpc.h>
 #include <generated/utsrelease.h>
@@ -33,7 +31,7 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
 {
        struct rxrpc_wire_header whdr;
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       struct sockaddr_in sin;
+       struct sockaddr_rxrpc srx;
        struct msghdr msg;
        struct kvec iov[2];
        size_t len;
@@ -41,12 +39,11 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
 
        _enter("");
 
-       sin.sin_family = AF_INET;
-       sin.sin_port = udp_hdr(skb)->source;
-       sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+       if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
+               return;
 
-       msg.msg_name    = &sin;
-       msg.msg_namelen = sizeof(sin);
+       msg.msg_name    = &srx.transport;
+       msg.msg_namelen = srx.transport_len;
        msg.msg_control = NULL;
        msg.msg_controllen = 0;
        msg.msg_flags   = 0;
index 782b9ad..e3fad80 100644 (file)
@@ -58,6 +58,17 @@ static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
                        memcmp(&local->srx.transport.sin.sin_addr,
                               &srx->transport.sin.sin_addr,
                               sizeof(struct in_addr));
+#ifdef CONFIG_AF_RXRPC_IPV6
+       case AF_INET6:
+               /* If the choice of UDP6 port is left up to the transport, then
+                * the endpoint record doesn't match.
+                */
+               return ((u16 __force)local->srx.transport.sin6.sin6_port -
+                       (u16 __force)srx->transport.sin6.sin6_port) ?:
+                       memcmp(&local->srx.transport.sin6.sin6_addr,
+                              &srx->transport.sin6.sin6_addr,
+                              sizeof(struct in6_addr));
+#endif
        default:
                BUG();
        }
@@ -100,11 +111,12 @@ static int rxrpc_open_socket(struct rxrpc_local *local)
        struct sock *sock;
        int ret, opt;
 
-       _enter("%p{%d}", local, local->srx.transport_type);
+       _enter("%p{%d,%d}",
+              local, local->srx.transport_type, local->srx.transport.family);
 
        /* create a socket to represent the local endpoint */
-       ret = sock_create_kern(&init_net, PF_INET, local->srx.transport_type,
-                              IPPROTO_UDP, &local->socket);
+       ret = sock_create_kern(&init_net, local->srx.transport.family,
+                              local->srx.transport_type, 0, &local->socket);
        if (ret < 0) {
                _leave(" = %d [socket]", ret);
                return ret;
@@ -169,18 +181,8 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
        long diff;
        int ret;
 
-       if (srx->transport.family == AF_INET) {
-               _enter("{%d,%u,%pI4+%hu}",
-                      srx->transport_type,
-                      srx->transport.family,
-                      &srx->transport.sin.sin_addr,
-                      ntohs(srx->transport.sin.sin_port));
-       } else {
-               _enter("{%d,%u}",
-                      srx->transport_type,
-                      srx->transport.family);
-               return ERR_PTR(-EAFNOSUPPORT);
-       }
+       _enter("{%d,%d,%pISp}",
+              srx->transport_type, srx->transport.family, &srx->transport);
 
        mutex_lock(&rxrpc_local_mutex);
 
@@ -233,13 +235,8 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
 found:
        mutex_unlock(&rxrpc_local_mutex);
 
-       _net("LOCAL %s %d {%d,%u,%pI4+%hu}",
-            age,
-            local->debug_id,
-            local->srx.transport_type,
-            local->srx.transport.family,
-            &local->srx.transport.sin.sin_addr,
-            ntohs(local->srx.transport.sin.sin_port));
+       _net("LOCAL %s %d {%pISp}",
+            age, local->debug_id, &local->srx.transport);
 
        _leave(" = %p", local);
        return local;
index fd096f7..8b91078 100644 (file)
@@ -50,7 +50,10 @@ unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
  * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
  * packets.
  */
-unsigned int rxrpc_rx_window_size = RXRPC_RXTX_BUFF_SIZE - 46;
+unsigned int rxrpc_rx_window_size = RXRPC_INIT_RX_WINDOW_SIZE;
+#if (RXRPC_RXTX_BUFF_SIZE - 1) < RXRPC_INIT_RX_WINDOW_SIZE
+#error Need to reduce RXRPC_INIT_RX_WINDOW_SIZE
+#endif
 
 /*
  * Maximum Rx MTU size.  This indicates to the sender the size of jumbo packet
index 719a4c2..06a9aca 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/gfp.h>
 #include <linux/skbuff.h>
 #include <linux/export.h>
-#include <linux/udp.h>
-#include <linux/ip.h>
 #include <net/sock.h>
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
@@ -71,10 +69,10 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
 
        mtu = call->conn->params.peer->if_mtu;
        mtu -= call->conn->params.peer->hdrsize;
-       jmax = (call->nr_jumbo_dup > 3) ? 1 : rxrpc_rx_jumbo_max;
+       jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
        pkt->ackinfo.rxMTU      = htonl(rxrpc_rx_mtu);
        pkt->ackinfo.maxMTU     = htonl(mtu);
-       pkt->ackinfo.rwind      = htonl(rxrpc_rx_window_size);
+       pkt->ackinfo.rwind      = htonl(call->rx_winsize);
        pkt->ackinfo.jumbo_max  = htonl(jmax);
 
        *ackp++ = 0;
@@ -260,6 +258,24 @@ send_fragmentable:
                                          (char *)&opt, sizeof(opt));
                }
                break;
+
+#ifdef CONFIG_AF_RXRPC_IPV6
+       case AF_INET6:
+               opt = IPV6_PMTUDISC_DONT;
+               ret = kernel_setsockopt(conn->params.local->socket,
+                                       SOL_IPV6, IPV6_MTU_DISCOVER,
+                                       (char *)&opt, sizeof(opt));
+               if (ret == 0) {
+                       ret = kernel_sendmsg(conn->params.local->socket, &msg,
+                                            iov, 1, iov[0].iov_len);
+
+                       opt = IPV6_PMTUDISC_DO;
+                       kernel_setsockopt(conn->params.local->socket,
+                                         SOL_IPV6, IPV6_MTU_DISCOVER,
+                                         (char *)&opt, sizeof(opt));
+               }
+               break;
+#endif
        }
 
        up_write(&conn->params.local->defrag_sem);
@@ -272,10 +288,7 @@ send_fragmentable:
  */
 void rxrpc_reject_packets(struct rxrpc_local *local)
 {
-       union {
-               struct sockaddr sa;
-               struct sockaddr_in sin;
-       } sa;
+       struct sockaddr_rxrpc srx;
        struct rxrpc_skb_priv *sp;
        struct rxrpc_wire_header whdr;
        struct sk_buff *skb;
@@ -292,32 +305,21 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        iov[1].iov_len = sizeof(code);
        size = sizeof(whdr) + sizeof(code);
 
-       msg.msg_name = &sa;
+       msg.msg_name = &srx.transport;
        msg.msg_control = NULL;
        msg.msg_controllen = 0;
        msg.msg_flags = 0;
 
-       memset(&sa, 0, sizeof(sa));
-       sa.sa.sa_family = local->srx.transport.family;
-       switch (sa.sa.sa_family) {
-       case AF_INET:
-               msg.msg_namelen = sizeof(sa.sin);
-               break;
-       default:
-               msg.msg_namelen = 0;
-               break;
-       }
-
        memset(&whdr, 0, sizeof(whdr));
        whdr.type = RXRPC_PACKET_TYPE_ABORT;
 
        while ((skb = skb_dequeue(&local->reject_queue))) {
                rxrpc_see_skb(skb);
                sp = rxrpc_skb(skb);
-               switch (sa.sa.sa_family) {
-               case AF_INET:
-                       sa.sin.sin_port = udp_hdr(skb)->source;
-                       sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+
+               if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
+                       msg.msg_namelen = srx.transport_len;
+
                        code = htonl(skb->priority);
 
                        whdr.epoch      = htonl(sp->hdr.epoch);
@@ -329,10 +331,6 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
                        whdr.flags      &= RXRPC_CLIENT_INITIATED;
 
                        kernel_sendmsg(local->socket, &msg, iov, 2, size);
-                       break;
-
-               default:
-                       break;
                }
 
                rxrpc_free_skb(skb);
index c894893..9e0725f 100644 (file)
@@ -66,6 +66,32 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
                }
                break;
 
+#ifdef CONFIG_AF_RXRPC_IPV6
+       case AF_INET6:
+               srx.transport.sin6.sin6_port = serr->port;
+               srx.transport_len = sizeof(struct sockaddr_in6);
+               switch (serr->ee.ee_origin) {
+               case SO_EE_ORIGIN_ICMP6:
+                       _net("Rx ICMP6");
+                       memcpy(&srx.transport.sin6.sin6_addr,
+                              skb_network_header(skb) + serr->addr_offset,
+                              sizeof(struct in6_addr));
+                       break;
+               case SO_EE_ORIGIN_ICMP:
+                       _net("Rx ICMP on v6 sock");
+                       memcpy(srx.transport.sin6.sin6_addr.s6_addr + 12,
+                              skb_network_header(skb) + serr->addr_offset,
+                              sizeof(struct in_addr));
+                       break;
+               default:
+                       memcpy(&srx.transport.sin6.sin6_addr,
+                              &ipv6_hdr(skb)->saddr,
+                              sizeof(struct in6_addr));
+                       break;
+               }
+               break;
+#endif
+
        default:
                BUG();
        }
index 2efe29a..f3e5766 100644 (file)
 #include <linux/skbuff.h>
 #include <linux/udp.h>
 #include <linux/in.h>
+#include <linux/in6.h>
 #include <linux/slab.h>
 #include <linux/hashtable.h>
 #include <net/sock.h>
 #include <net/af_rxrpc.h>
 #include <net/ip.h>
 #include <net/route.h>
+#include <net/ip6_route.h>
 #include "ar-internal.h"
 
 static DEFINE_HASHTABLE(rxrpc_peer_hash, 10);
@@ -50,6 +52,13 @@ static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local,
                size = sizeof(srx->transport.sin.sin_addr);
                p = (u16 *)&srx->transport.sin.sin_addr;
                break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+       case AF_INET6:
+               hash_key += (u16 __force)srx->transport.sin.sin_port;
+               size = sizeof(srx->transport.sin6.sin6_addr);
+               p = (u16 *)&srx->transport.sin6.sin6_addr;
+               break;
+#endif
        default:
                WARN(1, "AF_RXRPC: Unsupported transport address family\n");
                return 0;
@@ -93,6 +102,14 @@ static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
                        memcmp(&peer->srx.transport.sin.sin_addr,
                               &srx->transport.sin.sin_addr,
                               sizeof(struct in_addr));
+#ifdef CONFIG_AF_RXRPC_IPV6
+       case AF_INET6:
+               return ((u16 __force)peer->srx.transport.sin6.sin6_port -
+                       (u16 __force)srx->transport.sin6.sin6_port) ?:
+                       memcmp(&peer->srx.transport.sin6.sin6_addr,
+                              &srx->transport.sin6.sin6_addr,
+                              sizeof(struct in6_addr));
+#endif
        default:
                BUG();
        }
@@ -130,17 +147,7 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
 
        peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
        if (peer) {
-               switch (srx->transport.family) {
-               case AF_INET:
-                       _net("PEER %d {%d,%u,%pI4+%hu}",
-                            peer->debug_id,
-                            peer->srx.transport_type,
-                            peer->srx.transport.family,
-                            &peer->srx.transport.sin.sin_addr,
-                            ntohs(peer->srx.transport.sin.sin_port));
-                       break;
-               }
-
+               _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
                _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
        }
        return peer;
@@ -152,22 +159,53 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
  */
 static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
 {
+       struct dst_entry *dst;
        struct rtable *rt;
-       struct flowi4 fl4;
+       struct flowi fl;
+       struct flowi4 *fl4 = &fl.u.ip4;
+#ifdef CONFIG_AF_RXRPC_IPV6
+       struct flowi6 *fl6 = &fl.u.ip6;
+#endif
 
        peer->if_mtu = 1500;
 
-       rt = ip_route_output_ports(&init_net, &fl4, NULL,
-                                  peer->srx.transport.sin.sin_addr.s_addr, 0,
-                                  htons(7000), htons(7001),
-                                  IPPROTO_UDP, 0, 0);
-       if (IS_ERR(rt)) {
-               _leave(" [route err %ld]", PTR_ERR(rt));
-               return;
+       memset(&fl, 0, sizeof(fl));
+       switch (peer->srx.transport.family) {
+       case AF_INET:
+               rt = ip_route_output_ports(
+                       &init_net, fl4, NULL,
+                       peer->srx.transport.sin.sin_addr.s_addr, 0,
+                       htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
+               if (IS_ERR(rt)) {
+                       _leave(" [route err %ld]", PTR_ERR(rt));
+                       return;
+               }
+               dst = &rt->dst;
+               break;
+
+#ifdef CONFIG_AF_RXRPC_IPV6
+       case AF_INET6:
+               fl6->flowi6_iif = LOOPBACK_IFINDEX;
+               fl6->flowi6_scope = RT_SCOPE_UNIVERSE;
+               fl6->flowi6_proto = IPPROTO_UDP;
+               memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr,
+                      sizeof(struct in6_addr));
+               fl6->fl6_dport = htons(7001);
+               fl6->fl6_sport = htons(7000);
+               dst = ip6_route_output(&init_net, NULL, fl6);
+               if (IS_ERR(dst)) {
+                       _leave(" [route err %ld]", PTR_ERR(dst));
+                       return;
+               }
+               break;
+#endif
+
+       default:
+               BUG();
        }
 
-       peer->if_mtu = dst_mtu(&rt->dst);
-       dst_release(&rt->dst);
+       peer->if_mtu = dst_mtu(dst);
+       dst_release(dst);
 
        _leave(" [if_mtu %u]", peer->if_mtu);
 }
@@ -203,20 +241,28 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
  */
 static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
 {
+       peer->hash_key = hash_key;
        rxrpc_assess_MTU_size(peer);
        peer->mtu = peer->if_mtu;
 
-       if (peer->srx.transport.family == AF_INET) {
+       switch (peer->srx.transport.family) {
+       case AF_INET:
                peer->hdrsize = sizeof(struct iphdr);
-               switch (peer->srx.transport_type) {
-               case SOCK_DGRAM:
-                       peer->hdrsize += sizeof(struct udphdr);
-                       break;
-               default:
-                       BUG();
-                       break;
-               }
-       } else {
+               break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+       case AF_INET6:
+               peer->hdrsize = sizeof(struct ipv6hdr);
+               break;
+#endif
+       default:
+               BUG();
+       }
+
+       switch (peer->srx.transport_type) {
+       case SOCK_DGRAM:
+               peer->hdrsize += sizeof(struct udphdr);
+               break;
+       default:
                BUG();
        }
 
@@ -238,7 +284,6 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
 
        peer = rxrpc_alloc_peer(local, gfp);
        if (peer) {
-               peer->hash_key = hash_key;
                memcpy(&peer->srx, srx, sizeof(*srx));
                rxrpc_init_peer(peer, hash_key);
        }
@@ -285,11 +330,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
        struct rxrpc_peer *peer, *candidate;
        unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
 
-       _enter("{%d,%d,%pI4+%hu}",
-              srx->transport_type,
-              srx->transport_len,
-              &srx->transport.sin.sin_addr,
-              ntohs(srx->transport.sin.sin_port));
+       _enter("{%pISp}", &srx->transport);
 
        /* search the peer list first */
        rcu_read_lock();
@@ -326,11 +367,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
                        peer = candidate;
        }
 
-       _net("PEER %d {%d,%pI4+%hu}",
-            peer->debug_id,
-            peer->srx.transport_type,
-            &peer->srx.transport.sin.sin_addr,
-            ntohs(peer->srx.transport.sin.sin_port));
+       _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
 
        _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
        return peer;
index d529d1b..65cd980 100644 (file)
@@ -52,11 +52,12 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
        struct rxrpc_sock *rx;
        struct rxrpc_peer *peer;
        struct rxrpc_call *call;
-       char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
+       char lbuff[50], rbuff[50];
 
        if (v == &rxrpc_calls) {
                seq_puts(seq,
-                        "Proto Local                  Remote                "
+                        "Proto Local                                          "
+                        " Remote                                         "
                         " SvID ConnID   CallID   End Use State    Abort   "
                         " UserID\n");
                return 0;
@@ -68,9 +69,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
        if (rx) {
                local = READ_ONCE(rx->local);
                if (local)
-                       sprintf(lbuff, "%pI4:%u",
-                               &local->srx.transport.sin.sin_addr,
-                               ntohs(local->srx.transport.sin.sin_port));
+                       sprintf(lbuff, "%pISpc", &local->srx.transport);
                else
                        strcpy(lbuff, "no_local");
        } else {
@@ -79,14 +78,12 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
 
        peer = call->peer;
        if (peer)
-               sprintf(rbuff, "%pI4:%u",
-                       &peer->srx.transport.sin.sin_addr,
-                       ntohs(peer->srx.transport.sin.sin_port));
+               sprintf(rbuff, "%pISpc", &peer->srx.transport);
        else
                strcpy(rbuff, "no_connection");
 
        seq_printf(seq,
-                  "UDP   %-22.22s %-22.22s %4x %08x %08x %s %3u"
+                  "UDP   %-47.47s %-47.47s %4x %08x %08x %s %3u"
                   " %-8.8s %08x %lx\n",
                   lbuff,
                   rbuff,
@@ -145,11 +142,12 @@ static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
 static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
 {
        struct rxrpc_connection *conn;
-       char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
+       char lbuff[50], rbuff[50];
 
        if (v == &rxrpc_connection_proc_list) {
                seq_puts(seq,
-                        "Proto Local                  Remote                "
+                        "Proto Local                                          "
+                        " Remote                                         "
                         " SvID ConnID   End Use State    Key     "
                         " Serial   ISerial\n"
                         );
@@ -163,16 +161,12 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
                goto print;
        }
 
-       sprintf(lbuff, "%pI4:%u",
-               &conn->params.local->srx.transport.sin.sin_addr,
-               ntohs(conn->params.local->srx.transport.sin.sin_port));
+       sprintf(lbuff, "%pISpc", &conn->params.local->srx.transport);
 
-       sprintf(rbuff, "%pI4:%u",
-               &conn->params.peer->srx.transport.sin.sin_addr,
-               ntohs(conn->params.peer->srx.transport.sin.sin_port));
+       sprintf(rbuff, "%pISpc", &conn->params.peer->srx.transport);
 print:
        seq_printf(seq,
-                  "UDP   %-22.22s %-22.22s %4x %08x %s %3u"
+                  "UDP   %-47.47s %-47.47s %4x %08x %s %3u"
                   " %s %08x %08x %08x\n",
                   lbuff,
                   rbuff,
index 20d0b5c..a284205 100644 (file)
@@ -118,6 +118,7 @@ static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx,
                list_del_init(&call->recvmsg_link);
                write_unlock_bh(&rx->recvmsg_lock);
 
+               rxrpc_get_call(call, rxrpc_call_got);
                write_lock(&rx->call_lock);
                list_add_tail(&call->accept_link, &rx->to_be_accepted);
                write_unlock(&rx->call_lock);
@@ -463,6 +464,10 @@ try_again:
                                         flags, &copied);
                if (ret == -EAGAIN)
                        ret = 0;
+
+               if (after(call->rx_top, call->rx_hard_ack) &&
+                   call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
+                       rxrpc_notify_socket(call);
                break;
        default:
                ret = 0;
index b7ca8cf..a03c61c 100644 (file)
@@ -20,7 +20,7 @@ static const unsigned int one = 1;
 static const unsigned int four = 4;
 static const unsigned int thirtytwo = 32;
 static const unsigned int n_65535 = 65535;
-static const unsigned int n_max_acks = RXRPC_MAXACKS;
+static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
 
 /*
  * RxRPC operating parameters.
index b88914d..ff7af71 100644 (file)
@@ -30,6 +30,7 @@ int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb)
                srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
                return 0;
 
+#ifdef CONFIG_AF_RXRPC_IPV6
        case ETH_P_IPV6:
                srx->transport_type = SOCK_DGRAM;
                srx->transport_len = sizeof(srx->transport.sin6);
@@ -37,6 +38,7 @@ int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb)
                srx->transport.sin6.sin6_port = udp_hdr(skb)->source;
                srx->transport.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
                return 0;
+#endif
 
        default:
                pr_warn_ratelimited("AF_RXRPC: Unknown eth protocol %u\n",
index 72e3426..7795d5a 100644 (file)
@@ -749,6 +749,17 @@ config NET_ACT_CONNMARK
          To compile this code as a module, choose M here: the
          module will be called act_connmark.
 
+config NET_ACT_SKBMOD
+        tristate "skb data modification action"
+        depends on NET_CLS_ACT
+        ---help---
+         Say Y here to allow modification of skb data
+
+         If unsure, say N.
+
+         To compile this code as a module, choose M here: the
+         module will be called act_skbmod.
+
 config NET_ACT_IFE
         tristate "Inter-FE action based on IETF ForCES InterFE LFB"
         depends on NET_CLS_ACT
index b9d046b..148ae0d 100644 (file)
@@ -19,6 +19,7 @@ obj-$(CONFIG_NET_ACT_CSUM)    += act_csum.o
 obj-$(CONFIG_NET_ACT_VLAN)     += act_vlan.o
 obj-$(CONFIG_NET_ACT_BPF)      += act_bpf.o
 obj-$(CONFIG_NET_ACT_CONNMARK) += act_connmark.o
+obj-$(CONFIG_NET_ACT_SKBMOD)   += act_skbmod.o
 obj-$(CONFIG_NET_ACT_IFE)      += act_ife.o
 obj-$(CONFIG_NET_IFE_SKBMARK)  += act_meta_mark.o
 obj-$(CONFIG_NET_IFE_SKBPRIO)  += act_meta_skbprio.o
index bfa8707..1d39600 100644 (file)
@@ -39,13 +39,10 @@ static struct tc_action_ops act_bpf_ops;
 static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
                   struct tcf_result *res)
 {
+       bool at_ingress = skb_at_tc_ingress(skb);
        struct tcf_bpf *prog = to_bpf(act);
        struct bpf_prog *filter;
        int action, filter_res;
-       bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
-
-       if (unlikely(!skb_mac_header_was_set(skb)))
-               return TC_ACT_UNSPEC;
 
        tcf_lastuse_update(&prog->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
new file mode 100644 (file)
index 0000000..e7d9638
--- /dev/null
@@ -0,0 +1,301 @@
+/*
+ * net/sched/act_skbmod.c  skb data modifier
+ *
+ * Copyright (c) 2016 Jamal Hadi Salim <jhs@mojatatu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+
+#include <linux/tc_act/tc_skbmod.h>
+#include <net/tc_act/tc_skbmod.h>
+
+#define SKBMOD_TAB_MASK     15
+
+static int skbmod_net_id;
+static struct tc_action_ops act_skbmod_ops;
+
+#define MAX_EDIT_LEN ETH_HLEN
+static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a,
+                         struct tcf_result *res)
+{
+       struct tcf_skbmod *d = to_skbmod(a);
+       int action;
+       struct tcf_skbmod_params *p;
+       u64 flags;
+       int err;
+
+       tcf_lastuse_update(&d->tcf_tm);
+       bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
+
+       /* XXX: if you are going to edit more fields beyond ethernet header
+        * (example when you add IP header replacement or vlan swap)
+        * then MAX_EDIT_LEN needs to change appropriately
+       */
+       err = skb_ensure_writable(skb, MAX_EDIT_LEN);
+       if (unlikely(err)) { /* best policy is to drop on the floor */
+               qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
+               return TC_ACT_SHOT;
+       }
+
+       rcu_read_lock();
+       action = READ_ONCE(d->tcf_action);
+       if (unlikely(action == TC_ACT_SHOT)) {
+               qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
+               rcu_read_unlock();
+               return action;
+       }
+
+       p = rcu_dereference(d->skbmod_p);
+       flags = p->flags;
+       if (flags & SKBMOD_F_DMAC)
+               ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst);
+       if (flags & SKBMOD_F_SMAC)
+               ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src);
+       if (flags & SKBMOD_F_ETYPE)
+               eth_hdr(skb)->h_proto = p->eth_type;
+       rcu_read_unlock();
+
+       if (flags & SKBMOD_F_SWAPMAC) {
+               u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */
+               /*XXX: I am sure we can come up with more efficient swapping*/
+               ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest);
+               ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source);
+               ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr);
+       }
+
+       return action;
+}
+
+static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
+       [TCA_SKBMOD_PARMS]              = { .len = sizeof(struct tc_skbmod) },
+       [TCA_SKBMOD_DMAC]               = { .len = ETH_ALEN },
+       [TCA_SKBMOD_SMAC]               = { .len = ETH_ALEN },
+       [TCA_SKBMOD_ETYPE]              = { .type = NLA_U16 },
+};
+
+static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
+                          struct nlattr *est, struct tc_action **a,
+                          int ovr, int bind)
+{
+       struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+       struct nlattr *tb[TCA_SKBMOD_MAX + 1];
+       struct tcf_skbmod_params *p, *p_old;
+       struct tc_skbmod *parm;
+       struct tcf_skbmod *d;
+       bool exists = false;
+       u8 *daddr = NULL;
+       u8 *saddr = NULL;
+       u16 eth_type = 0;
+       u32 lflags = 0;
+       int ret = 0, err;
+
+       if (!nla)
+               return -EINVAL;
+
+       err = nla_parse_nested(tb, TCA_SKBMOD_MAX, nla, skbmod_policy);
+       if (err < 0)
+               return err;
+
+       if (!tb[TCA_SKBMOD_PARMS])
+               return -EINVAL;
+
+       if (tb[TCA_SKBMOD_DMAC]) {
+               daddr = nla_data(tb[TCA_SKBMOD_DMAC]);
+               lflags |= SKBMOD_F_DMAC;
+       }
+
+       if (tb[TCA_SKBMOD_SMAC]) {
+               saddr = nla_data(tb[TCA_SKBMOD_SMAC]);
+               lflags |= SKBMOD_F_SMAC;
+       }
+
+       if (tb[TCA_SKBMOD_ETYPE]) {
+               eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]);
+               lflags |= SKBMOD_F_ETYPE;
+       }
+
+       parm = nla_data(tb[TCA_SKBMOD_PARMS]);
+       if (parm->flags & SKBMOD_F_SWAPMAC)
+               lflags = SKBMOD_F_SWAPMAC;
+
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
+       if (!lflags)
+               return -EINVAL;
+
+       if (!exists) {
+               ret = tcf_hash_create(tn, parm->index, est, a,
+                                     &act_skbmod_ops, bind, true);
+               if (ret)
+                       return ret;
+
+               ret = ACT_P_CREATED;
+       } else {
+               tcf_hash_release(*a, bind);
+               if (!ovr)
+                       return -EEXIST;
+       }
+
+       d = to_skbmod(*a);
+
+       ASSERT_RTNL();
+       p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
+       if (unlikely(!p)) {
+               if (ovr)
+                       tcf_hash_release(*a, bind);
+               return -ENOMEM;
+       }
+
+       p->flags = lflags;
+       d->tcf_action = parm->action;
+
+       p_old = rtnl_dereference(d->skbmod_p);
+
+       if (ovr)
+               spin_lock_bh(&d->tcf_lock);
+
+       if (lflags & SKBMOD_F_DMAC)
+               ether_addr_copy(p->eth_dst, daddr);
+       if (lflags & SKBMOD_F_SMAC)
+               ether_addr_copy(p->eth_src, saddr);
+       if (lflags & SKBMOD_F_ETYPE)
+               p->eth_type = htons(eth_type);
+
+       rcu_assign_pointer(d->skbmod_p, p);
+       if (ovr)
+               spin_unlock_bh(&d->tcf_lock);
+
+       if (p_old)
+               kfree_rcu(p_old, rcu);
+
+       if (ret == ACT_P_CREATED)
+               tcf_hash_insert(tn, *a);
+       return ret;
+}
+
+static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
+{
+       struct tcf_skbmod *d = to_skbmod(a);
+       struct tcf_skbmod_params  *p;
+
+       p = rcu_dereference_protected(d->skbmod_p, 1);
+       kfree_rcu(p, rcu);
+}
+
+static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
+                          int bind, int ref)
+{
+       struct tcf_skbmod *d = to_skbmod(a);
+       unsigned char *b = skb_tail_pointer(skb);
+       struct tcf_skbmod_params  *p = rtnl_dereference(d->skbmod_p);
+       struct tc_skbmod opt = {
+               .index   = d->tcf_index,
+               .refcnt  = d->tcf_refcnt - ref,
+               .bindcnt = d->tcf_bindcnt - bind,
+               .action  = d->tcf_action,
+       };
+       struct tcf_t t;
+
+       opt.flags  = p->flags;
+       if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
+       if ((p->flags & SKBMOD_F_DMAC) &&
+           nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst))
+               goto nla_put_failure;
+       if ((p->flags & SKBMOD_F_SMAC) &&
+           nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src))
+               goto nla_put_failure;
+       if ((p->flags & SKBMOD_F_ETYPE) &&
+           nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type)))
+               goto nla_put_failure;
+
+       tcf_tm_dump(&t, &d->tcf_tm);
+       if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
+               goto nla_put_failure;
+
+       return skb->len;
+nla_put_failure:
+       rcu_read_unlock();
+       nlmsg_trim(skb, b);
+       return -1;
+}
+
+static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb,
+                            struct netlink_callback *cb, int type,
+                            const struct tc_action_ops *ops)
+{
+       struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+       return tcf_generic_walker(tn, skb, cb, type, ops);
+}
+
+static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+       return tcf_hash_search(tn, a, index);
+}
+
+static struct tc_action_ops act_skbmod_ops = {
+       .kind           =       "skbmod",
+       .type           =       TCA_ACT_SKBMOD,
+       .owner          =       THIS_MODULE,
+       .act            =       tcf_skbmod_run,
+       .dump           =       tcf_skbmod_dump,
+       .init           =       tcf_skbmod_init,
+       .cleanup        =       tcf_skbmod_cleanup,
+       .walk           =       tcf_skbmod_walker,
+       .lookup         =       tcf_skbmod_search,
+       .size           =       sizeof(struct tcf_skbmod),
+};
+
+static __net_init int skbmod_init_net(struct net *net)
+{
+       struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+       return tc_action_net_init(tn, &act_skbmod_ops, SKBMOD_TAB_MASK);
+}
+
+static void __net_exit skbmod_exit_net(struct net *net)
+{
+       struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+       tc_action_net_exit(tn);
+}
+
+static struct pernet_operations skbmod_net_ops = {
+       .init = skbmod_init_net,
+       .exit = skbmod_exit_net,
+       .id   = &skbmod_net_id,
+       .size = sizeof(struct tc_action_net),
+};
+
+MODULE_AUTHOR("Jamal Hadi Salim, <jhs@mojatatu.com>");
+MODULE_DESCRIPTION("SKB data mod-ing");
+MODULE_LICENSE("GPL");
+
+static int __init skbmod_init_module(void)
+{
+       return tcf_register_action(&act_skbmod_ops, &skbmod_net_ops);
+}
+
+static void __exit skbmod_cleanup_module(void)
+{
+       tcf_unregister_action(&act_skbmod_ops, &skbmod_net_ops);
+}
+
+module_init(skbmod_init_module);
+module_exit(skbmod_cleanup_module);
index dceff74..af47bdf 100644 (file)
@@ -194,15 +194,12 @@ static void tunnel_key_release(struct tc_action *a, int bind)
        struct tcf_tunnel_key *t = to_tunnel_key(a);
        struct tcf_tunnel_key_params *params;
 
-       rcu_read_lock();
-       params = rcu_dereference(t->params);
+       params = rcu_dereference_protected(t->params, 1);
 
        if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
                dst_release(&params->tcft_enc_metadata->dst);
 
        kfree_rcu(params, rcu);
-
-       rcu_read_unlock();
 }
 
 static int tunnel_key_dump_addresses(struct sk_buff *skb,
@@ -245,10 +242,8 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
                .bindcnt  = t->tcf_bindcnt - bind,
        };
        struct tcf_t tm;
-       int ret = -1;
 
-       rcu_read_lock();
-       params = rcu_dereference(t->params);
+       params = rtnl_dereference(t->params);
 
        opt.t_action = params->tcft_action;
        opt.action = params->action;
@@ -272,15 +267,11 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
                          &tm, TCA_TUNNEL_KEY_PAD))
                goto nla_put_failure;
 
-       ret = skb->len;
-       goto out;
+       return skb->len;
 
 nla_put_failure:
        nlmsg_trim(skb, b);
-out:
-       rcu_read_unlock();
-
-       return ret;
+       return -1;
 }
 
 static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
index 4742f41..1d92d4d 100644 (file)
@@ -83,9 +83,6 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
        struct cls_bpf_prog *prog;
        int ret = -1;
 
-       if (unlikely(!skb_mac_header_was_set(skb)))
-               return -1;
-
        /* Needed here for accessing maps. */
        rcu_read_lock();
        list_for_each_entry_rcu(prog, &head->plist, link) {
index b084b2a..a3f4c70 100644 (file)
@@ -33,7 +33,6 @@ struct fl_flow_key {
        struct flow_dissector_key_basic basic;
        struct flow_dissector_key_eth_addrs eth;
        struct flow_dissector_key_vlan vlan;
-       struct flow_dissector_key_addrs ipaddrs;
        union {
                struct flow_dissector_key_ipv4_addrs ipv4;
                struct flow_dissector_key_ipv6_addrs ipv6;
@@ -335,6 +334,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
        [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
        [TCA_FLOWER_KEY_ENC_IPV6_DST]   = { .len = sizeof(struct in6_addr) },
        [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
+       [TCA_FLOWER_KEY_TCP_SRC_MASK]   = { .type = NLA_U16 },
+       [TCA_FLOWER_KEY_TCP_DST_MASK]   = { .type = NLA_U16 },
+       [TCA_FLOWER_KEY_UDP_SRC_MASK]   = { .type = NLA_U16 },
+       [TCA_FLOWER_KEY_UDP_DST_MASK]   = { .type = NLA_U16 },
 };
 
 static void fl_set_key_val(struct nlattr **tb,
@@ -432,17 +435,17 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
 
        if (key->basic.ip_proto == IPPROTO_TCP) {
                fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
-                              &mask->tp.src, TCA_FLOWER_UNSPEC,
+                              &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
                               sizeof(key->tp.src));
                fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
-                              &mask->tp.dst, TCA_FLOWER_UNSPEC,
+                              &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
                               sizeof(key->tp.dst));
        } else if (key->basic.ip_proto == IPPROTO_UDP) {
                fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
-                              &mask->tp.src, TCA_FLOWER_UNSPEC,
+                              &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
                               sizeof(key->tp.src));
                fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
-                              &mask->tp.dst, TCA_FLOWER_UNSPEC,
+                              &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
                               sizeof(key->tp.dst));
        }
 
@@ -877,18 +880,18 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
 
        if (key->basic.ip_proto == IPPROTO_TCP &&
            (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
-                            &mask->tp.src, TCA_FLOWER_UNSPEC,
+                            &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
                             sizeof(key->tp.src)) ||
             fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
-                            &mask->tp.dst, TCA_FLOWER_UNSPEC,
+                            &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
                             sizeof(key->tp.dst))))
                goto nla_put_failure;
        else if (key->basic.ip_proto == IPPROTO_UDP &&
                 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
-                                 &mask->tp.src, TCA_FLOWER_UNSPEC,
+                                 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
                                  sizeof(key->tp.src)) ||
                  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
-                                 &mask->tp.dst, TCA_FLOWER_UNSPEC,
+                                 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
                                  sizeof(key->tp.dst))))
                goto nla_put_failure;
 
index e5458b9..dc52cc1 100644 (file)
@@ -823,20 +823,24 @@ nla_put_failure:
 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
-       u64 now = ktime_get_ns();
-       struct tc_fq_qd_stats st = {
-               .gc_flows               = q->stat_gc_flows,
-               .highprio_packets       = q->stat_internal_packets,
-               .tcp_retrans            = q->stat_tcp_retrans,
-               .throttled              = q->stat_throttled,
-               .flows_plimit           = q->stat_flows_plimit,
-               .pkts_too_long          = q->stat_pkts_too_long,
-               .allocation_errors      = q->stat_allocation_errors,
-               .flows                  = q->flows,
-               .inactive_flows         = q->inactive_flows,
-               .throttled_flows        = q->throttled_flows,
-               .time_next_delayed_flow = q->time_next_delayed_flow - now,
-       };
+       struct tc_fq_qd_stats st;
+
+       sch_tree_lock(sch);
+
+       st.gc_flows               = q->stat_gc_flows;
+       st.highprio_packets       = q->stat_internal_packets;
+       st.tcp_retrans            = q->stat_tcp_retrans;
+       st.throttled              = q->stat_throttled;
+       st.flows_plimit           = q->stat_flows_plimit;
+       st.pkts_too_long          = q->stat_pkts_too_long;
+       st.allocation_errors      = q->stat_allocation_errors;
+       st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
+       st.flows                  = q->flows;
+       st.inactive_flows         = q->inactive_flows;
+       st.throttled_flows        = q->throttled_flows;
+       st.pad                    = 0;
+
+       sch_tree_unlock(sch);
 
        return gnet_stats_copy_app(d, &st, sizeof(st));
 }
index a55e547..af9cc80 100644 (file)
@@ -70,6 +70,19 @@ static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
        return msg;
 }
 
+void sctp_datamsg_free(struct sctp_datamsg *msg)
+{
+       struct sctp_chunk *chunk;
+
+       /* This doesn't have to be a _safe vairant because
+        * sctp_chunk_free() only drops the refs.
+        */
+       list_for_each_entry(chunk, &msg->chunks, frag_list)
+               sctp_chunk_free(chunk);
+
+       sctp_datamsg_put(msg);
+}
+
 /* Final destructruction of datamsg memory. */
 static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
 {
index 1f1682b..0c605ec 100644 (file)
@@ -180,7 +180,6 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
                                       int one_packet, gfp_t gfp)
 {
        sctp_xmit_t retval;
-       int error = 0;
 
        pr_debug("%s: packet:%p size:%Zu chunk:%p size:%d\n", __func__,
                 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
@@ -188,6 +187,8 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
        switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
        case SCTP_XMIT_PMTU_FULL:
                if (!packet->has_cookie_echo) {
+                       int error = 0;
+
                        error = sctp_packet_transmit(packet, gfp);
                        if (error < 0)
                                chunk->skb->sk->sk_err = -error;
@@ -441,14 +442,14 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
                         * time. Application may notice this error.
                         */
                        pr_err_once("Trying to GSO but underlying device doesn't support it.");
-                       goto nomem;
+                       goto err;
                }
        } else {
                pkt_size = packet->size;
        }
        head = alloc_skb(pkt_size + MAX_HEADER, gfp);
        if (!head)
-               goto nomem;
+               goto err;
        if (gso) {
                NAPI_GRO_CB(head)->last = head;
                skb_shinfo(head)->gso_type = sk->sk_gso_type;
@@ -469,8 +470,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
                }
        }
        dst = dst_clone(tp->dst);
-       if (!dst)
-               goto no_route;
+       if (!dst) {
+               if (asoc)
+                       IP_INC_STATS(sock_net(asoc->base.sk),
+                                    IPSTATS_MIB_OUTNOROUTES);
+               goto nodst;
+       }
        skb_dst_set(head, dst);
 
        /* Build the SCTP header.  */
@@ -621,8 +626,10 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
                if (!gso)
                        break;
 
-               if (skb_gro_receive(&head, nskb))
+               if (skb_gro_receive(&head, nskb)) {
+                       kfree_skb(nskb);
                        goto nomem;
+               }
                nskb = NULL;
                if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
                                 sk->sk_gso_max_segs))
@@ -716,18 +723,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
        }
        head->ignore_df = packet->ipfragok;
        tp->af_specific->sctp_xmit(head, tp);
+       goto out;
 
-out:
-       sctp_packet_reset(packet);
-       return err;
-no_route:
-       kfree_skb(head);
-       if (nskb != head)
-               kfree_skb(nskb);
-
-       if (asoc)
-               IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+nomem:
+       if (packet->auth && list_empty(&packet->auth->list))
+               sctp_chunk_free(packet->auth);
 
+nodst:
        /* FIXME: Returning the 'err' will effect all the associations
         * associated with a socket, although only one of the paths of the
         * association is unreachable.
@@ -736,22 +738,18 @@ no_route:
         * required.
         */
         /* err = -EHOSTUNREACH; */
-err:
-       /* Control chunks are unreliable so just drop them.  DATA chunks
-        * will get resent or dropped later.
-        */
+       kfree_skb(head);
 
+err:
        list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
                list_del_init(&chunk->list);
                if (!sctp_chunk_is_data(chunk))
                        sctp_chunk_free(chunk);
        }
-       goto out;
-nomem:
-       if (packet->auth && list_empty(&packet->auth->list))
-               sctp_chunk_free(packet->auth);
-       err = -ENOMEM;
-       goto err;
+
+out:
+       sctp_packet_reset(packet);
+       return err;
 }
 
 /********************************************************************
@@ -878,7 +876,7 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
                                        struct sctp_chunk *chunk,
                                        u16 chunk_len)
 {
-       size_t psize, pmtu;
+       size_t psize, pmtu, maxsize;
        sctp_xmit_t retval = SCTP_XMIT_OK;
 
        psize = packet->size;
@@ -906,6 +904,17 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
                        goto out;
                }
 
+               /* Similarly, if this chunk was built before a PMTU
+                * reduction, we have to fragment it at IP level now. So
+                * if the packet already contains something, we need to
+                * flush.
+                */
+               maxsize = pmtu - packet->overhead;
+               if (packet->auth)
+                       maxsize -= WORD_ROUND(packet->auth->skb->len);
+               if (chunk_len > maxsize)
+                       retval = SCTP_XMIT_PMTU_FULL;
+
                /* It is also okay to fragment if the chunk we are
                 * adding is a control chunk, but only if current packet
                 * is not a GSO one otherwise it causes fragmentation of
index 72e54a4..8c3f446 100644 (file)
@@ -68,7 +68,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
 
 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
 
-static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
+static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
 
 /* Add data to the front of the queue. */
 static inline void sctp_outq_head_data(struct sctp_outq *q,
@@ -285,10 +285,9 @@ void sctp_outq_free(struct sctp_outq *q)
 }
 
 /* Put a new chunk in an sctp_outq.  */
-int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
+void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
 {
        struct net *net = sock_net(q->asoc->base.sk);
-       int error = 0;
 
        pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
                 chunk && chunk->chunk_hdr ?
@@ -299,54 +298,26 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
         * immediately.
         */
        if (sctp_chunk_is_data(chunk)) {
-               /* Is it OK to queue data chunks?  */
-               /* From 9. Termination of Association
-                *
-                * When either endpoint performs a shutdown, the
-                * association on each peer will stop accepting new
-                * data from its user and only deliver data in queue
-                * at the time of sending or receiving the SHUTDOWN
-                * chunk.
-                */
-               switch (q->asoc->state) {
-               case SCTP_STATE_CLOSED:
-               case SCTP_STATE_SHUTDOWN_PENDING:
-               case SCTP_STATE_SHUTDOWN_SENT:
-               case SCTP_STATE_SHUTDOWN_RECEIVED:
-               case SCTP_STATE_SHUTDOWN_ACK_SENT:
-                       /* Cannot send after transport endpoint shutdown */
-                       error = -ESHUTDOWN;
-                       break;
-
-               default:
-                       pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
-                                __func__, q, chunk, chunk && chunk->chunk_hdr ?
-                                sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
-                                "illegal chunk");
-
-                       sctp_chunk_hold(chunk);
-                       sctp_outq_tail_data(q, chunk);
-                       if (chunk->asoc->prsctp_enable &&
-                           SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
-                               chunk->asoc->sent_cnt_removable++;
-                       if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
-                               SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
-                       else
-                               SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
-                       break;
-               }
+               pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
+                        __func__, q, chunk, chunk && chunk->chunk_hdr ?
+                        sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
+                        "illegal chunk");
+
+               sctp_outq_tail_data(q, chunk);
+               if (chunk->asoc->prsctp_enable &&
+                   SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
+                       chunk->asoc->sent_cnt_removable++;
+               if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+                       SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
+               else
+                       SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
        } else {
                list_add_tail(&chunk->list, &q->control_chunk_list);
                SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
        }
 
-       if (error < 0)
-               return error;
-
        if (!q->cork)
-               error = sctp_outq_flush(q, 0, gfp);
-
-       return error;
+               sctp_outq_flush(q, 0, gfp);
 }
 
 /* Insert a chunk into the sorted list based on the TSNs.  The retransmit list
@@ -559,7 +530,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
                     sctp_retransmit_reason_t reason)
 {
        struct net *net = sock_net(q->asoc->base.sk);
-       int error = 0;
 
        switch (reason) {
        case SCTP_RTXR_T3_RTX:
@@ -603,10 +573,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
         * will be flushed at the end.
         */
        if (reason != SCTP_RTXR_FAST_RTX)
-               error = sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
-
-       if (error)
-               q->asoc->base.sk->sk_err = -error;
+               sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
 }
 
 /*
@@ -778,12 +745,12 @@ redo:
 }
 
 /* Cork the outqueue so queued chunks are really queued. */
-int sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
+void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
 {
        if (q->cork)
                q->cork = 0;
 
-       return sctp_outq_flush(q, 0, gfp);
+       sctp_outq_flush(q, 0, gfp);
 }
 
 
@@ -796,7 +763,7 @@ int sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
  * locking concerns must be made.  Today we use the sock lock to protect
  * this function.
  */
-static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
+static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 {
        struct sctp_packet *packet;
        struct sctp_packet singleton;
@@ -919,8 +886,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                        sctp_packet_config(&singleton, vtag, 0);
                        sctp_packet_append_chunk(&singleton, chunk);
                        error = sctp_packet_transmit(&singleton, gfp);
-                       if (error < 0)
-                               return error;
+                       if (error < 0) {
+                               asoc->base.sk->sk_err = -error;
+                               return;
+                       }
                        break;
 
                case SCTP_CID_ABORT:
@@ -1018,6 +987,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                retran:
                        error = sctp_outq_flush_rtx(q, packet,
                                                    rtx_timeout, &start_timer);
+                       if (error < 0)
+                               asoc->base.sk->sk_err = -error;
 
                        if (start_timer) {
                                sctp_transport_reset_t3_rtx(transport);
@@ -1192,14 +1163,15 @@ sctp_flush_out:
                                                      struct sctp_transport,
                                                      send_ready);
                packet = &t->packet;
-               if (!sctp_packet_empty(packet))
+               if (!sctp_packet_empty(packet)) {
                        error = sctp_packet_transmit(packet, gfp);
+                       if (error < 0)
+                               asoc->base.sk->sk_err = -error;
+               }
 
                /* Clear the burst limited state, if any */
                sctp_transport_burst_reset(t);
        }
-
-       return error;
 }
 
 /* Update unack_data based on the incoming SACK chunk */
index 12d4519..c345bf1 100644 (file)
@@ -1020,19 +1020,13 @@ static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
  * This way the whole message is queued up and bundling if
  * encouraged for small fragments.
  */
-static int sctp_cmd_send_msg(struct sctp_association *asoc,
-                               struct sctp_datamsg *msg, gfp_t gfp)
+static void sctp_cmd_send_msg(struct sctp_association *asoc,
+                             struct sctp_datamsg *msg, gfp_t gfp)
 {
        struct sctp_chunk *chunk;
-       int error = 0;
-
-       list_for_each_entry(chunk, &msg->chunks, frag_list) {
-               error = sctp_outq_tail(&asoc->outqueue, chunk, gfp);
-               if (error)
-                       break;
-       }
 
-       return error;
+       list_for_each_entry(chunk, &msg->chunks, frag_list)
+               sctp_outq_tail(&asoc->outqueue, chunk, gfp);
 }
 
 
@@ -1427,8 +1421,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                                local_cork = 1;
                        }
                        /* Send a chunk to our peer.  */
-                       error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk,
-                                              gfp);
+                       sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
                        break;
 
                case SCTP_CMD_SEND_PKT:
@@ -1682,7 +1675,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                case SCTP_CMD_FORCE_PRIM_RETRAN:
                        t = asoc->peer.retran_path;
                        asoc->peer.retran_path = asoc->peer.primary_path;
-                       error = sctp_outq_uncork(&asoc->outqueue, gfp);
+                       sctp_outq_uncork(&asoc->outqueue, gfp);
                        local_cork = 0;
                        asoc->peer.retran_path = t;
                        break;
@@ -1709,7 +1702,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
                                sctp_outq_cork(&asoc->outqueue);
                                local_cork = 1;
                        }
-                       error = sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
+                       sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
                        break;
                case SCTP_CMD_SEND_NEXT_ASCONF:
                        sctp_cmd_send_asconf(asoc);
@@ -1739,9 +1732,9 @@ out:
         */
        if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
                if (chunk->end_of_packet || chunk->singleton)
-                       error = sctp_outq_uncork(&asoc->outqueue, gfp);
+                       sctp_outq_uncork(&asoc->outqueue, gfp);
        } else if (local_cork)
-               error = sctp_outq_uncork(&asoc->outqueue, gfp);
+               sctp_outq_uncork(&asoc->outqueue, gfp);
 
        if (sp->data_ready_signalled)
                sp->data_ready_signalled = 0;
index 9fc417a..6cdc61c 100644 (file)
@@ -1958,6 +1958,8 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
 
        /* Now send the (possibly) fragmented message. */
        list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
+               sctp_chunk_hold(chunk);
+
                /* Do accounting for the write space.  */
                sctp_set_owner_w(chunk);
 
@@ -1970,13 +1972,15 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
         * breaks.
         */
        err = sctp_primitive_SEND(net, asoc, datamsg);
-       sctp_datamsg_put(datamsg);
        /* Did the lower layer accept the chunk? */
-       if (err)
+       if (err) {
+               sctp_datamsg_free(datamsg);
                goto out_free;
+       }
 
        pr_debug("%s: we sent primitively\n", __func__);
 
+       sctp_datamsg_put(datamsg);
        err = msg_len;
 
        if (unlikely(wait_connect)) {
index 7f79fb7..66f23b3 100644 (file)
@@ -453,7 +453,7 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
        struct rpc_xprt_switch *xps;
 
        if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
-               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
                xps = args->bc_xprt->xpt_bc_xps;
                xprt_switch_get(xps);
        } else {
@@ -520,7 +520,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
        char servername[48];
 
        if (args->bc_xprt) {
-               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
                xprt = args->bc_xprt->xpt_bc_xprt;
                if (xprt) {
                        xprt_get(xprt);
index 6b626a6..a04fe9b 100644 (file)
@@ -62,6 +62,8 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
 
 /**
  * named_prepare_buf - allocate & initialize a publication message
+ *
+ * The buffer returned is of size INT_H_SIZE + payload size
  */
 static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
                                         u32 dest)
@@ -141,9 +143,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
        struct publication *publ;
        struct sk_buff *skb = NULL;
        struct distr_item *item = NULL;
-       uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
-                       ITEM_SIZE;
-       uint msg_rem = msg_dsz;
+       u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
+                       ITEM_SIZE) * ITEM_SIZE;
+       u32 msg_rem = msg_dsz;
 
        list_for_each_entry(publ, pls, local_list) {
                /* Prepare next buffer: */
index dd27468..d80cd3f 100644 (file)
@@ -665,7 +665,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
 
        if (!opts[TIPC_NLA_UDP_LOCAL] || !opts[TIPC_NLA_UDP_REMOTE]) {
                pr_err("Invalid UDP bearer configuration");
-               return -EINVAL;
+               err = -EINVAL;
+               goto err;
        }
 
        err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_LOCAL], &local,
index f1dffe8..8309687 100644 (file)
@@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
 {
        struct unix_sock *u = unix_sk(sk);
 
-       if (mutex_lock_interruptible(&u->readlock))
+       if (mutex_lock_interruptible(&u->iolock))
                return -EINTR;
 
        sk->sk_peek_off = val;
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
 
        return 0;
 }
@@ -779,7 +779,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
        spin_lock_init(&u->lock);
        atomic_long_set(&u->inflight, 0);
        INIT_LIST_HEAD(&u->link);
-       mutex_init(&u->readlock); /* single task reading lock */
+       mutex_init(&u->iolock); /* single task reading lock */
+       mutex_init(&u->bindlock); /* single task binding lock */
        init_waitqueue_head(&u->peer_wait);
        init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
        unix_insert_socket(unix_sockets_unbound(sk), sk);
@@ -848,7 +849,7 @@ static int unix_autobind(struct socket *sock)
        int err;
        unsigned int retries = 0;
 
-       err = mutex_lock_interruptible(&u->readlock);
+       err = mutex_lock_interruptible(&u->bindlock);
        if (err)
                return err;
 
@@ -895,7 +896,7 @@ retry:
        spin_unlock(&unix_table_lock);
        err = 0;
 
-out:   mutex_unlock(&u->readlock);
+out:   mutex_unlock(&u->bindlock);
        return err;
 }
 
@@ -954,20 +955,32 @@ fail:
        return NULL;
 }
 
-static int unix_mknod(struct dentry *dentry, const struct path *path, umode_t mode,
-                     struct path *res)
+static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
 {
-       int err;
+       struct dentry *dentry;
+       struct path path;
+       int err = 0;
+       /*
+        * Get the parent directory, calculate the hash for last
+        * component.
+        */
+       dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
+       err = PTR_ERR(dentry);
+       if (IS_ERR(dentry))
+               return err;
 
-       err = security_path_mknod(path, dentry, mode, 0);
+       /*
+        * All right, let's create it.
+        */
+       err = security_path_mknod(&path, dentry, mode, 0);
        if (!err) {
-               err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
+               err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
                if (!err) {
-                       res->mnt = mntget(path->mnt);
+                       res->mnt = mntget(path.mnt);
                        res->dentry = dget(dentry);
                }
        }
-
+       done_path_create(&path, dentry);
        return err;
 }
 
@@ -978,12 +991,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        struct unix_sock *u = unix_sk(sk);
        struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
        char *sun_path = sunaddr->sun_path;
-       int err, name_err;
+       int err;
        unsigned int hash;
        struct unix_address *addr;
        struct hlist_head *list;
-       struct path path;
-       struct dentry *dentry;
 
        err = -EINVAL;
        if (sunaddr->sun_family != AF_UNIX)
@@ -999,34 +1010,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        addr_len = err;
 
-       name_err = 0;
-       dentry = NULL;
-       if (sun_path[0]) {
-               /* Get the parent directory, calculate the hash for last
-                * component.
-                */
-               dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
-
-               if (IS_ERR(dentry)) {
-                       /* delay report until after 'already bound' check */
-                       name_err = PTR_ERR(dentry);
-                       dentry = NULL;
-               }
-       }
-
-       err = mutex_lock_interruptible(&u->readlock);
+       err = mutex_lock_interruptible(&u->bindlock);
        if (err)
-               goto out_path;
+               goto out;
 
        err = -EINVAL;
        if (u->addr)
                goto out_up;
 
-       if (name_err) {
-               err = name_err == -EEXIST ? -EADDRINUSE : name_err;
-               goto out_up;
-       }
-
        err = -ENOMEM;
        addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
        if (!addr)
@@ -1037,11 +1028,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        addr->hash = hash ^ sk->sk_type;
        atomic_set(&addr->refcnt, 1);
 
-       if (dentry) {
-               struct path u_path;
+       if (sun_path[0]) {
+               struct path path;
                umode_t mode = S_IFSOCK |
                       (SOCK_INODE(sock)->i_mode & ~current_umask());
-               err = unix_mknod(dentry, &path, mode, &u_path);
+               err = unix_mknod(sun_path, mode, &path);
                if (err) {
                        if (err == -EEXIST)
                                err = -EADDRINUSE;
@@ -1049,9 +1040,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                        goto out_up;
                }
                addr->hash = UNIX_HASH_SIZE;
-               hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+               hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
                spin_lock(&unix_table_lock);
-               u->path = u_path;
+               u->path = path;
                list = &unix_socket_table[hash];
        } else {
                spin_lock(&unix_table_lock);
@@ -1073,11 +1064,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 out_unlock:
        spin_unlock(&unix_table_lock);
 out_up:
-       mutex_unlock(&u->readlock);
-out_path:
-       if (dentry)
-               done_path_create(&path, dentry);
-
+       mutex_unlock(&u->bindlock);
 out:
        return err;
 }
@@ -1969,17 +1956,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
        if (false) {
 alloc_skb:
                unix_state_unlock(other);
-               mutex_unlock(&unix_sk(other)->readlock);
+               mutex_unlock(&unix_sk(other)->iolock);
                newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
                                              &err, 0);
                if (!newskb)
                        goto err;
        }
 
-       /* we must acquire readlock as we modify already present
+       /* we must acquire iolock as we modify already present
         * skbs in the sk_receive_queue and mess with skb->len
         */
-       err = mutex_lock_interruptible(&unix_sk(other)->readlock);
+       err = mutex_lock_interruptible(&unix_sk(other)->iolock);
        if (err) {
                err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
                goto err;
@@ -2046,7 +2033,7 @@ alloc_skb:
        }
 
        unix_state_unlock(other);
-       mutex_unlock(&unix_sk(other)->readlock);
+       mutex_unlock(&unix_sk(other)->iolock);
 
        other->sk_data_ready(other);
        scm_destroy(&scm);
@@ -2055,7 +2042,7 @@ alloc_skb:
 err_state_unlock:
        unix_state_unlock(other);
 err_unlock:
-       mutex_unlock(&unix_sk(other)->readlock);
+       mutex_unlock(&unix_sk(other)->iolock);
 err:
        kfree_skb(newskb);
        if (send_sigpipe && !(flags & MSG_NOSIGNAL))
@@ -2123,7 +2110,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
        timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
 
        do {
-               mutex_lock(&u->readlock);
+               mutex_lock(&u->iolock);
 
                skip = sk_peek_offset(sk, flags);
                skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
@@ -2131,14 +2118,14 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
                if (skb)
                        break;
 
-               mutex_unlock(&u->readlock);
+               mutex_unlock(&u->iolock);
 
                if (err != -EAGAIN)
                        break;
        } while (timeo &&
                 !__skb_wait_for_more_packets(sk, &err, &timeo, last));
 
-       if (!skb) { /* implies readlock unlocked */
+       if (!skb) { /* implies iolock unlocked */
                unix_state_lock(sk);
                /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
                if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
@@ -2203,7 +2190,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
 
 out_free:
        skb_free_datagram(sk, skb);
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
 out:
        return err;
 }
@@ -2298,7 +2285,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
        /* Lock the socket to prevent queue disordering
         * while sleeps in memcpy_tomsg
         */
-       mutex_lock(&u->readlock);
+       mutex_lock(&u->iolock);
 
        if (flags & MSG_PEEK)
                skip = sk_peek_offset(sk, flags);
@@ -2340,7 +2327,7 @@ again:
                                break;
                        }
 
-                       mutex_unlock(&u->readlock);
+                       mutex_unlock(&u->iolock);
 
                        timeo = unix_stream_data_wait(sk, timeo, last,
                                                      last_len);
@@ -2351,7 +2338,7 @@ again:
                                goto out;
                        }
 
-                       mutex_lock(&u->readlock);
+                       mutex_lock(&u->iolock);
                        goto redo;
 unlock:
                        unix_state_unlock(sk);
@@ -2454,7 +2441,7 @@ unlock:
                }
        } while (size);
 
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
        if (state->msg)
                scm_recv(sock, state->msg, &scm, flags);
        else
@@ -2495,9 +2482,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
        int ret;
        struct unix_sock *u = unix_sk(sk);
 
-       mutex_unlock(&u->readlock);
+       mutex_unlock(&u->iolock);
        ret = splice_to_pipe(pipe, spd);
-       mutex_lock(&u->readlock);
+       mutex_lock(&u->iolock);
 
        return ret;
 }
index dbb2738..6250b1c 100644 (file)
@@ -958,29 +958,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
                        return private(dev, iwr, cmd, info, handler);
        }
        /* Old driver API : call driver ioctl handler */
-       if (dev->netdev_ops->ndo_do_ioctl) {
-#ifdef CONFIG_COMPAT
-               if (info->flags & IW_REQUEST_FLAG_COMPAT) {
-                       int ret = 0;
-                       struct iwreq iwr_lcl;
-                       struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
-
-                       memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
-                       iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
-                       iwr_lcl.u.data.length = iwp_compat->length;
-                       iwr_lcl.u.data.flags = iwp_compat->flags;
-
-                       ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
-
-                       iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
-                       iwp_compat->length = iwr_lcl.u.data.length;
-                       iwp_compat->flags = iwr_lcl.u.data.flags;
-
-                       return ret;
-               } else
-#endif
-                       return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
-       }
+       if (dev->netdev_ops->ndo_do_ioctl)
+               return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
        return -EOPNOTSUPP;
 }
 
index 1c4ad47..6e3f025 100644 (file)
@@ -207,15 +207,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        family = XFRM_SPI_SKB_CB(skb)->family;
 
        /* if tunnel is present override skb->mark value with tunnel i_key */
-       if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
-               switch (family) {
-               case AF_INET:
+       switch (family) {
+       case AF_INET:
+               if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
                        mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
-                       break;
-               case AF_INET6:
+               break;
+       case AF_INET6:
+               if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
                        mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
-                       break;
-               }
+               break;
        }
 
        /* Allocate new secpath or COW existing one. */
index f7ce626..fd69866 100644 (file)
@@ -643,6 +643,10 @@ static void xfrm_hash_rebuild(struct work_struct *work)
 
        /* re-insert all policies by order of creation */
        list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+               if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
+                       /* skip socket policies */
+                       continue;
+               }
                newpos = NULL;
                chain = policy_hash_bysel(net, &policy->selector,
                                          policy->family,
index d516845..cb65d91 100644 (file)
@@ -896,7 +896,8 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb)
        struct sock *sk = cb->skb->sk;
        struct net *net = sock_net(sk);
 
-       xfrm_state_walk_done(walk, net);
+       if (cb->args[0])
+               xfrm_state_walk_done(walk, net);
        return 0;
 }
 
@@ -921,8 +922,6 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
                u8 proto = 0;
                int err;
 
-               cb->args[0] = 1;
-
                err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX,
                                  xfrma_policy);
                if (err < 0)
@@ -939,6 +938,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
                        proto = nla_get_u8(attrs[XFRMA_PROTO]);
 
                xfrm_state_walk_init(walk, proto, filter);
+               cb->args[0] = 1;
        }
 
        (void) xfrm_state_walk(net, walk, dump_one_state, &info);
@@ -2051,9 +2051,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (up->hard) {
                xfrm_policy_delete(xp, p->dir);
                xfrm_audit_policy_delete(xp, 1, true);
-       } else {
-               // reset the timers here?
-               WARN(1, "Don't know what to do with soft policy expire\n");
        }
        km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
 
@@ -2117,7 +2114,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        err = verify_newpolicy_info(&ua->policy);
        if (err)
-               goto bad_policy;
+               goto free_state;
 
        /*   build an XP */
        xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
@@ -2149,8 +2146,6 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        return 0;
 
-bad_policy:
-       WARN(1, "BAD policy passed\n");
 free_state:
        kfree(x);
 nomem:
index 7a15289..3303bb8 100644 (file)
@@ -1,4 +1,5 @@
 /* Copyright (c) 2016 VMware
+ * Copyright (c) 2016 Facebook
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -8,12 +9,15 @@
 #include <uapi/linux/if_ether.h>
 #include <uapi/linux/if_packet.h>
 #include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
 #include <uapi/linux/in.h>
 #include <uapi/linux/tcp.h>
 #include <uapi/linux/filter.h>
 #include <uapi/linux/pkt_cls.h>
+#include <net/ipv6.h>
 #include "bpf_helpers.h"
 
+#define _htonl __builtin_bswap32
 #define ERROR(ret) do {\
                char fmt[] = "ERROR line:%d ret:%d\n";\
                bpf_trace_printk(fmt, sizeof(fmt), __LINE__, ret); \
@@ -188,4 +192,190 @@ int _geneve_get_tunnel(struct __sk_buff *skb)
        return TC_ACT_OK;
 }
 
+SEC("ipip_set_tunnel")
+int _ipip_set_tunnel(struct __sk_buff *skb)
+{
+       struct bpf_tunnel_key key = {};
+       void *data = (void *)(long)skb->data;
+       struct iphdr *iph = data;
+       struct tcphdr *tcp = data + sizeof(*iph);
+       void *data_end = (void *)(long)skb->data_end;
+       int ret;
+
+       /* single length check */
+       if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+               ERROR(1);
+               return TC_ACT_SHOT;
+       }
+
+       key.tunnel_ttl = 64;
+       if (iph->protocol == IPPROTO_ICMP) {
+               key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+       } else {
+               if (iph->protocol != IPPROTO_TCP || iph->ihl != 5)
+                       return TC_ACT_SHOT;
+
+               if (tcp->dest == htons(5200))
+                       key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+               else if (tcp->dest == htons(5201))
+                       key.remote_ipv4 = 0xac100165; /* 172.16.1.101 */
+               else
+                       return TC_ACT_SHOT;
+       }
+
+       ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
+       if (ret < 0) {
+               ERROR(ret);
+               return TC_ACT_SHOT;
+       }
+
+       return TC_ACT_OK;
+}
+
+SEC("ipip_get_tunnel")
+int _ipip_get_tunnel(struct __sk_buff *skb)
+{
+       int ret;
+       struct bpf_tunnel_key key;
+       char fmt[] = "remote ip 0x%x\n";
+
+       ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+       if (ret < 0) {
+               ERROR(ret);
+               return TC_ACT_SHOT;
+       }
+
+       bpf_trace_printk(fmt, sizeof(fmt), key.remote_ipv4);
+       return TC_ACT_OK;
+}
+
+SEC("ipip6_set_tunnel")
+int _ipip6_set_tunnel(struct __sk_buff *skb)
+{
+       struct bpf_tunnel_key key = {};
+       void *data = (void *)(long)skb->data;
+       struct iphdr *iph = data;
+       struct tcphdr *tcp = data + sizeof(*iph);
+       void *data_end = (void *)(long)skb->data_end;
+       int ret;
+
+       /* single length check */
+       if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+               ERROR(1);
+               return TC_ACT_SHOT;
+       }
+
+       key.remote_ipv6[0] = _htonl(0x2401db00);
+       key.tunnel_ttl = 64;
+
+       if (iph->protocol == IPPROTO_ICMP) {
+               key.remote_ipv6[3] = _htonl(1);
+       } else {
+               if (iph->protocol != IPPROTO_TCP || iph->ihl != 5) {
+                       ERROR(iph->protocol);
+                       return TC_ACT_SHOT;
+               }
+
+               if (tcp->dest == htons(5200)) {
+                       key.remote_ipv6[3] = _htonl(1);
+               } else if (tcp->dest == htons(5201)) {
+                       key.remote_ipv6[3] = _htonl(2);
+               } else {
+                       ERROR(tcp->dest);
+                       return TC_ACT_SHOT;
+               }
+       }
+
+       ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+       if (ret < 0) {
+               ERROR(ret);
+               return TC_ACT_SHOT;
+       }
+
+       return TC_ACT_OK;
+}
+
+SEC("ipip6_get_tunnel")
+int _ipip6_get_tunnel(struct __sk_buff *skb)
+{
+       int ret;
+       struct bpf_tunnel_key key;
+       char fmt[] = "remote ip6 %x::%x\n";
+
+       ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+       if (ret < 0) {
+               ERROR(ret);
+               return TC_ACT_SHOT;
+       }
+
+       bpf_trace_printk(fmt, sizeof(fmt), _htonl(key.remote_ipv6[0]),
+                        _htonl(key.remote_ipv6[3]));
+       return TC_ACT_OK;
+}
+
+SEC("ip6ip6_set_tunnel")
+int _ip6ip6_set_tunnel(struct __sk_buff *skb)
+{
+       struct bpf_tunnel_key key = {};
+       void *data = (void *)(long)skb->data;
+       struct ipv6hdr *iph = data;
+       struct tcphdr *tcp = data + sizeof(*iph);
+       void *data_end = (void *)(long)skb->data_end;
+       int ret;
+
+       /* single length check */
+       if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+               ERROR(1);
+               return TC_ACT_SHOT;
+       }
+
+       key.remote_ipv6[0] = _htonl(0x2401db00);
+       key.tunnel_ttl = 64;
+
+       if (iph->nexthdr == NEXTHDR_ICMP) {
+               key.remote_ipv6[3] = _htonl(1);
+       } else {
+               if (iph->nexthdr != NEXTHDR_TCP) {
+                       ERROR(iph->nexthdr);
+                       return TC_ACT_SHOT;
+               }
+
+               if (tcp->dest == htons(5200)) {
+                       key.remote_ipv6[3] = _htonl(1);
+               } else if (tcp->dest == htons(5201)) {
+                       key.remote_ipv6[3] = _htonl(2);
+               } else {
+                       ERROR(tcp->dest);
+                       return TC_ACT_SHOT;
+               }
+       }
+
+       ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+       if (ret < 0) {
+               ERROR(ret);
+               return TC_ACT_SHOT;
+       }
+
+       return TC_ACT_OK;
+}
+
+SEC("ip6ip6_get_tunnel")
+int _ip6ip6_get_tunnel(struct __sk_buff *skb)
+{
+       int ret;
+       struct bpf_tunnel_key key;
+       char fmt[] = "remote ip6 %x::%x\n";
+
+       ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+       if (ret < 0) {
+               ERROR(ret);
+               return TC_ACT_SHOT;
+       }
+
+       bpf_trace_printk(fmt, sizeof(fmt), _htonl(key.remote_ipv6[0]),
+                        _htonl(key.remote_ipv6[3]));
+       return TC_ACT_OK;
+}
+
+
 char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_ipip.sh b/samples/bpf/test_ipip.sh
new file mode 100755 (executable)
index 0000000..1969254
--- /dev/null
@@ -0,0 +1,178 @@
+#!/bin/bash
+
+function config_device {
+       ip netns add at_ns0
+       ip netns add at_ns1
+       ip netns add at_ns2
+       ip link add veth0 type veth peer name veth0b
+       ip link add veth1 type veth peer name veth1b
+       ip link add veth2 type veth peer name veth2b
+       ip link set veth0b up
+       ip link set veth1b up
+       ip link set veth2b up
+       ip link set dev veth0b mtu 1500
+       ip link set dev veth1b mtu 1500
+       ip link set dev veth2b mtu 1500
+       ip link set veth0 netns at_ns0
+       ip link set veth1 netns at_ns1
+       ip link set veth2 netns at_ns2
+       ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
+       ip netns exec at_ns0 ip addr add 2401:db00::1/64 dev veth0 nodad
+       ip netns exec at_ns0 ip link set dev veth0 up
+       ip netns exec at_ns1 ip addr add 172.16.1.101/24 dev veth1
+       ip netns exec at_ns1 ip addr add 2401:db00::2/64 dev veth1 nodad
+       ip netns exec at_ns1 ip link set dev veth1 up
+       ip netns exec at_ns2 ip addr add 172.16.1.200/24 dev veth2
+       ip netns exec at_ns2 ip addr add 2401:db00::3/64 dev veth2 nodad
+       ip netns exec at_ns2 ip link set dev veth2 up
+       ip link add br0 type bridge
+       ip link set br0 up
+       ip link set dev br0 mtu 1500
+       ip link set veth0b master br0
+       ip link set veth1b master br0
+       ip link set veth2b master br0
+}
+
+function add_ipip_tunnel {
+       ip netns exec at_ns0 \
+               ip link add dev $DEV_NS type ipip local 172.16.1.100 remote 172.16.1.200
+       ip netns exec at_ns0 ip link set dev $DEV_NS up
+       ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+       ip netns exec at_ns1 \
+               ip link add dev $DEV_NS type ipip local 172.16.1.101 remote 172.16.1.200
+       ip netns exec at_ns1 ip link set dev $DEV_NS up
+       # same inner IP address in at_ns0 and at_ns1
+       ip netns exec at_ns1 ip addr add dev $DEV_NS 10.1.1.100/24
+
+       ip netns exec at_ns2 ip link add dev $DEV type ipip external
+       ip netns exec at_ns2 ip link set dev $DEV up
+       ip netns exec at_ns2 ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_ipip6_tunnel {
+       ip netns exec at_ns0 \
+               ip link add dev $DEV_NS type ip6tnl mode ipip6 local 2401:db00::1/64 remote 2401:db00::3/64
+       ip netns exec at_ns0 ip link set dev $DEV_NS up
+       ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+       ip netns exec at_ns1 \
+               ip link add dev $DEV_NS type ip6tnl mode ipip6 local 2401:db00::2/64 remote 2401:db00::3/64
+       ip netns exec at_ns1 ip link set dev $DEV_NS up
+       # same inner IP address in at_ns0 and at_ns1
+       ip netns exec at_ns1 ip addr add dev $DEV_NS 10.1.1.100/24
+
+       ip netns exec at_ns2 ip link add dev $DEV type ip6tnl mode ipip6 external
+       ip netns exec at_ns2 ip link set dev $DEV up
+       ip netns exec at_ns2 ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_ip6ip6_tunnel {
+       ip netns exec at_ns0 \
+               ip link add dev $DEV_NS type ip6tnl mode ip6ip6 local 2401:db00::1/64 remote 2401:db00::3/64
+       ip netns exec at_ns0 ip link set dev $DEV_NS up
+       ip netns exec at_ns0 ip addr add dev $DEV_NS 2601:646::1/64
+       ip netns exec at_ns1 \
+               ip link add dev $DEV_NS type ip6tnl mode ip6ip6 local 2401:db00::2/64 remote 2401:db00::3/64
+       ip netns exec at_ns1 ip link set dev $DEV_NS up
+       # same inner IP address in at_ns0 and at_ns1
+       ip netns exec at_ns1 ip addr add dev $DEV_NS 2601:646::1/64
+
+       ip netns exec at_ns2 ip link add dev $DEV type ip6tnl mode ip6ip6 external
+       ip netns exec at_ns2 ip link set dev $DEV up
+       ip netns exec at_ns2 ip addr add dev $DEV 2601:646::2/64
+}
+
+function attach_bpf {
+       DEV=$1
+       SET_TUNNEL=$2
+       GET_TUNNEL=$3
+       ip netns exec at_ns2 tc qdisc add dev $DEV clsact
+       ip netns exec at_ns2 tc filter add dev $DEV egress bpf da obj tcbpf2_kern.o sec $SET_TUNNEL
+       ip netns exec at_ns2 tc filter add dev $DEV ingress bpf da obj tcbpf2_kern.o sec $GET_TUNNEL
+}
+
+function test_ipip {
+       DEV_NS=ipip_std
+       DEV=ipip_bpf
+       config_device
+#      tcpdump -nei br0 &
+       cat /sys/kernel/debug/tracing/trace_pipe &
+
+       add_ipip_tunnel
+       attach_bpf $DEV ipip_set_tunnel ipip_get_tunnel
+
+       ip netns exec at_ns0 ping -c 1 10.1.1.200
+       ip netns exec at_ns2 ping -c 1 10.1.1.100
+       ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+       ip netns exec at_ns1 iperf -sD -p 5201 > /dev/null
+       sleep 0.2
+       # tcp check _same_ IP over different tunnels
+       ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5200
+       ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5201
+       cleanup
+}
+
+# IPv4 over IPv6 tunnel
+function test_ipip6 {
+       DEV_NS=ipip_std
+       DEV=ipip_bpf
+       config_device
+#      tcpdump -nei br0 &
+       cat /sys/kernel/debug/tracing/trace_pipe &
+
+       add_ipip6_tunnel
+       attach_bpf $DEV ipip6_set_tunnel ipip6_get_tunnel
+
+       ip netns exec at_ns0 ping -c 1 10.1.1.200
+       ip netns exec at_ns2 ping -c 1 10.1.1.100
+       ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+       ip netns exec at_ns1 iperf -sD -p 5201 > /dev/null
+       sleep 0.2
+       # tcp check _same_ IP over different tunnels
+       ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5200
+       ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5201
+       cleanup
+}
+
+# IPv6 over IPv6 tunnel
+function test_ip6ip6 {
+       DEV_NS=ipip_std
+       DEV=ipip_bpf
+       config_device
+#      tcpdump -nei br0 &
+       cat /sys/kernel/debug/tracing/trace_pipe &
+
+       add_ip6ip6_tunnel
+       attach_bpf $DEV ip6ip6_set_tunnel ip6ip6_get_tunnel
+
+       ip netns exec at_ns0 ping -6 -c 1 2601:646::2
+       ip netns exec at_ns2 ping -6 -c 1 2601:646::1
+       ip netns exec at_ns0 iperf -6sD -p 5200 > /dev/null
+       ip netns exec at_ns1 iperf -6sD -p 5201 > /dev/null
+       sleep 0.2
+       # tcp check _same_ IP over different tunnels
+       ip netns exec at_ns2 iperf -6c 2601:646::1 -n 5k -p 5200
+       ip netns exec at_ns2 iperf -6c 2601:646::1 -n 5k -p 5201
+       cleanup
+}
+
+function cleanup {
+       set +ex
+       pkill iperf
+       ip netns delete at_ns0
+       ip netns delete at_ns1
+       ip netns delete at_ns2
+       ip link del veth0
+       ip link del veth1
+       ip link del veth2
+       ip link del br0
+       pkill tcpdump
+       pkill cat
+       set -ex
+}
+
+cleanup
+echo "Testing IP tunnels..."
+test_ipip
+test_ipip6
+test_ip6ip6
+echo "*** PASS ***"
index 4956589..1ff634f 100755 (executable)
@@ -9,15 +9,13 @@
 # local 172.16.1.200 remote 172.16.1.100
 # veth1 IP: 172.16.1.200, tunnel dev <type>11
 
-set -e
-
 function config_device {
        ip netns add at_ns0
        ip link add veth0 type veth peer name veth1
        ip link set veth0 netns at_ns0
        ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
        ip netns exec at_ns0 ip link set dev veth0 up
-       ip link set dev veth1 up
+       ip link set dev veth1 up mtu 1500
        ip addr add dev veth1 172.16.1.200/24
 }
 
@@ -67,6 +65,19 @@ function add_geneve_tunnel {
        ip addr add dev $DEV 10.1.1.200/24
 }
 
+function add_ipip_tunnel {
+       # in namespace
+       ip netns exec at_ns0 \
+               ip link add dev $DEV_NS type $TYPE local 172.16.1.100 remote 172.16.1.200
+       ip netns exec at_ns0 ip link set dev $DEV_NS up
+       ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+
+       # out of namespace
+       ip link add dev $DEV type $TYPE external
+       ip link set dev $DEV up
+       ip addr add dev $DEV 10.1.1.200/24
+}
+
 function attach_bpf {
        DEV=$1
        SET_TUNNEL=$2
@@ -85,6 +96,7 @@ function test_gre {
        attach_bpf $DEV gre_set_tunnel gre_get_tunnel
        ping -c 1 10.1.1.100
        ip netns exec at_ns0 ping -c 1 10.1.1.200
+       cleanup
 }
 
 function test_vxlan {
@@ -96,6 +108,7 @@ function test_vxlan {
        attach_bpf $DEV vxlan_set_tunnel vxlan_get_tunnel
        ping -c 1 10.1.1.100
        ip netns exec at_ns0 ping -c 1 10.1.1.200
+       cleanup
 }
 
 function test_geneve {
@@ -107,21 +120,48 @@ function test_geneve {
        attach_bpf $DEV geneve_set_tunnel geneve_get_tunnel
        ping -c 1 10.1.1.100
        ip netns exec at_ns0 ping -c 1 10.1.1.200
+       cleanup
+}
+
+function test_ipip {
+       TYPE=ipip
+       DEV_NS=ipip00
+       DEV=ipip11
+       config_device
+       tcpdump -nei veth1 &
+       cat /sys/kernel/debug/tracing/trace_pipe &
+       add_ipip_tunnel
+       ethtool -K veth1 gso off gro off rx off tx off
+       ip link set dev veth1 mtu 1500
+       attach_bpf $DEV ipip_set_tunnel ipip_get_tunnel
+       ping -c 1 10.1.1.100
+       ip netns exec at_ns0 ping -c 1 10.1.1.200
+       ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+       sleep 0.2
+       iperf -c 10.1.1.100 -n 5k -p 5200
+       cleanup
 }
 
 function cleanup {
+       set +ex
+       pkill iperf
        ip netns delete at_ns0
        ip link del veth1
-       ip link del $DEV
+       ip link del ipip11
+       ip link del gretap11
+       ip link del geneve11
+       pkill tcpdump
+       pkill cat
+       set -ex
 }
 
+cleanup
 echo "Testing GRE tunnel..."
 test_gre
-cleanup
 echo "Testing VXLAN tunnel..."
 test_vxlan
-cleanup
 echo "Testing GENEVE tunnel..."
 test_geneve
-cleanup
-echo "Success"
+echo "Testing IPIP tunnel..."
+test_ipip
+echo "*** PASS ***"
index 4de3cc4..206a6b3 100755 (executable)
@@ -3570,15 +3570,6 @@ sub process {
                        }
                }
 
-# check for uses of DEFINE_PCI_DEVICE_TABLE
-               if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
-                       if (WARN("DEFINE_PCI_DEVICE_TABLE",
-                                "Prefer struct pci_device_id over deprecated DEFINE_PCI_DEVICE_TABLE\n" . $herecurr) &&
-                           $fix) {
-                               $fixed[$fixlinenr] =~ s/\b(?:static\s+|)DEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=\s*/static const struct pci_device_id $1\[\] = /;
-                       }
-               }
-
 # check for new typedefs, only function parameters and sparse annotations
 # make sense.
                if ($line =~ /\btypedef\s/ &&
index e1c09e2..8ea9fd2 100755 (executable)
@@ -332,7 +332,9 @@ if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
        (cd $objtree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrobjfiles"
 fi
 (cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
-(cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
+if grep -q '^CONFIG_GCC_PLUGINS=y' $KCONFIG_CONFIG ; then
+       (cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
+fi
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
 (cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
index ed7eef2..b3775a9 100755 (executable)
@@ -206,7 +206,6 @@ regex_c=(
        '/\<DEFINE_PER_CPU_SHARED_ALIGNED([^,]*, *\([[:alnum:]_]*\)/\1/v/'
        '/\<DECLARE_WAIT_QUEUE_HEAD(\([[:alnum:]_]*\)/\1/v/'
        '/\<DECLARE_\(TASKLET\|WORK\|DELAYED_WORK\)(\([[:alnum:]_]*\)/\2/v/'
-       '/\<DEFINE_PCI_DEVICE_TABLE(\([[:alnum:]_]*\)/\1/v/'
        '/\(^\s\)OFFSET(\([[:alnum:]_]*\)/\2/v/'
        '/\(^\s\)DEFINE(\([[:alnum:]_]*\)/\2/v/'
        '/\<DEFINE_HASHTABLE(\([[:alnum:]_]*\)/\1/v/'
index da10d9b..118f454 100644 (file)
@@ -147,6 +147,17 @@ config HARDENED_USERCOPY
          or are part of the kernel text. This kills entire classes
          of heap overflow exploits and similar kernel memory exposures.
 
+config HARDENED_USERCOPY_PAGESPAN
+       bool "Refuse to copy allocations that span multiple pages"
+       depends on HARDENED_USERCOPY
+       depends on EXPERT
+       help
+         When a multi-page allocation is done without __GFP_COMP,
+         hardened usercopy will reject attempts to copy it. There are,
+         however, several cases of this in the kernel that have not all
+         been removed. This config is intended to be used only while
+         trying to find such users.
+
 source security/selinux/Kconfig
 source security/smack/Kconfig
 source security/tomoyo/Kconfig
index 795437b..b450a27 100644 (file)
@@ -1633,11 +1633,13 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
                return -EBUSY;
        }
        list_add_tail(&rmidi->list, &snd_rawmidi_devices);
+       mutex_unlock(&register_mutex);
        err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
                                  rmidi->card, rmidi->device,
                                  &snd_rawmidi_f_ops, rmidi, &rmidi->dev);
        if (err < 0) {
                rmidi_err(rmidi, "unable to register\n");
+               mutex_lock(&register_mutex);
                list_del(&rmidi->list);
                mutex_unlock(&register_mutex);
                return err;
@@ -1645,6 +1647,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
        if (rmidi->ops && rmidi->ops->dev_register &&
            (err = rmidi->ops->dev_register(rmidi)) < 0) {
                snd_unregister_device(&rmidi->dev);
+               mutex_lock(&register_mutex);
                list_del(&rmidi->list);
                mutex_unlock(&register_mutex);
                return err;
@@ -1677,7 +1680,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
                }
        }
 #endif /* CONFIG_SND_OSSEMUL */
-       mutex_unlock(&register_mutex);
        sprintf(name, "midi%d", rmidi->device);
        entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
        if (entry) {
index 9a6157e..fc144f4 100644 (file)
@@ -35,6 +35,9 @@
 #include <sound/initval.h>
 #include <linux/kmod.h>
 
+/* internal flags */
+#define SNDRV_TIMER_IFLG_PAUSED                0x00010000
+
 #if IS_ENABLED(CONFIG_SND_HRTIMER)
 #define DEFAULT_TIMER_LIMIT 4
 #else
@@ -294,8 +297,21 @@ int snd_timer_open(struct snd_timer_instance **ti,
                get_device(&timer->card->card_dev);
        timeri->slave_class = tid->dev_sclass;
        timeri->slave_id = slave_id;
-       if (list_empty(&timer->open_list_head) && timer->hw.open)
-               timer->hw.open(timer);
+
+       if (list_empty(&timer->open_list_head) && timer->hw.open) {
+               int err = timer->hw.open(timer);
+               if (err) {
+                       kfree(timeri->owner);
+                       kfree(timeri);
+
+                       if (timer->card)
+                               put_device(&timer->card->card_dev);
+                       module_put(timer->module);
+                       mutex_unlock(&register_mutex);
+                       return err;
+               }
+       }
+
        list_add_tail(&timeri->open_list, &timer->open_list_head);
        snd_timer_check_master(timeri);
        mutex_unlock(&register_mutex);
@@ -526,6 +542,10 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
                }
        }
        timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+       if (stop)
+               timeri->flags &= ~SNDRV_TIMER_IFLG_PAUSED;
+       else
+               timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
        snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
                          SNDRV_TIMER_EVENT_CONTINUE);
  unlock:
@@ -587,6 +607,10 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
  */
 int snd_timer_continue(struct snd_timer_instance *timeri)
 {
+       /* timer can continue only after pause */
+       if (!(timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
+               return -EINVAL;
+
        if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
                return snd_timer_start_slave(timeri, false);
        else
@@ -813,6 +837,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
        timer->tmr_subdevice = tid->subdevice;
        if (id)
                strlcpy(timer->id, id, sizeof(timer->id));
+       timer->sticks = 1;
        INIT_LIST_HEAD(&timer->device_list);
        INIT_LIST_HEAD(&timer->open_list_head);
        INIT_LIST_HEAD(&timer->active_list_head);
@@ -1817,6 +1842,9 @@ static int snd_timer_user_continue(struct file *file)
        tu = file->private_data;
        if (!tu->timeri)
                return -EBADFD;
+       /* start timer instead of continue if it's not used before */
+       if (!(tu->timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
+               return snd_timer_user_start(file);
        tu->timeri->lost = 0;
        return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
 }
@@ -1958,6 +1986,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                tu->qused--;
                spin_unlock_irq(&tu->qlock);
 
+               mutex_lock(&tu->ioctl_lock);
                if (tu->tread) {
                        if (copy_to_user(buffer, &tu->tqueue[qhead],
                                         sizeof(struct snd_timer_tread)))
@@ -1967,6 +1996,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                                         sizeof(struct snd_timer_read)))
                                err = -EFAULT;
                }
+               mutex_unlock(&tu->ioctl_lock);
 
                spin_lock_irq(&tu->qlock);
                if (err < 0)
index 03ed352..d73c12b 100644 (file)
@@ -108,7 +108,6 @@ struct snd_efw {
        u8 *resp_buf;
        u8 *pull_ptr;
        u8 *push_ptr;
-       unsigned int resp_queues;
 };
 
 int snd_efw_transaction_cmd(struct fw_unit *unit,
index 33df865..2e1d9a2 100644 (file)
@@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
 {
        unsigned int length, till_end, type;
        struct snd_efw_transaction *t;
+       u8 *pull_ptr;
        long count = 0;
 
        if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
@@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
        buf += sizeof(type);
 
        /* write into buffer as many responses as possible */
-       while (efw->resp_queues > 0) {
-               t = (struct snd_efw_transaction *)(efw->pull_ptr);
+       spin_lock_irq(&efw->lock);
+
+       /*
+        * When another task reaches here during this task's access to user
+        * space, it picks up current position in buffer and can read the same
+        * series of responses.
+        */
+       pull_ptr = efw->pull_ptr;
+
+       while (efw->push_ptr != pull_ptr) {
+               t = (struct snd_efw_transaction *)(pull_ptr);
                length = be32_to_cpu(t->length) * sizeof(__be32);
 
                /* confirm enough space for this response */
@@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
                /* copy from ring buffer to user buffer */
                while (length > 0) {
                        till_end = snd_efw_resp_buf_size -
-                               (unsigned int)(efw->pull_ptr - efw->resp_buf);
+                               (unsigned int)(pull_ptr - efw->resp_buf);
                        till_end = min_t(unsigned int, length, till_end);
 
-                       if (copy_to_user(buf, efw->pull_ptr, till_end))
+                       spin_unlock_irq(&efw->lock);
+
+                       if (copy_to_user(buf, pull_ptr, till_end))
                                return -EFAULT;
 
-                       efw->pull_ptr += till_end;
-                       if (efw->pull_ptr >= efw->resp_buf +
-                                            snd_efw_resp_buf_size)
-                               efw->pull_ptr -= snd_efw_resp_buf_size;
+                       spin_lock_irq(&efw->lock);
+
+                       pull_ptr += till_end;
+                       if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
+                               pull_ptr -= snd_efw_resp_buf_size;
 
                        length -= till_end;
                        buf += till_end;
                        count += till_end;
                        remained -= till_end;
                }
-
-               efw->resp_queues--;
        }
 
+       /*
+        * All of tasks can read from the buffer nearly simultaneously, but the
+        * last position for each task is different depending on the length of
+        * given buffer. Here, for simplicity, a position of buffer is set by
+        * the latest task. It's better for a listening application to allow one
+        * thread to read from the buffer. Unless, each task can read different
+        * sequence of responses depending on variation of buffer length.
+        */
+       efw->pull_ptr = pull_ptr;
+
+       spin_unlock_irq(&efw->lock);
+
        return count;
 }
 
@@ -76,14 +99,17 @@ static long
 hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
                  loff_t *offset)
 {
-       union snd_firewire_event event;
+       union snd_firewire_event event = {
+               .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+       };
 
-       memset(&event, 0, sizeof(event));
+       spin_lock_irq(&efw->lock);
 
-       event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
        event.lock_status.status = (efw->dev_lock_count > 0);
        efw->dev_lock_changed = false;
 
+       spin_unlock_irq(&efw->lock);
+
        count = min_t(long, count, sizeof(event.lock_status));
 
        if (copy_to_user(buf, &event, count))
@@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
 {
        struct snd_efw *efw = hwdep->private_data;
        DEFINE_WAIT(wait);
+       bool dev_lock_changed;
+       bool queued;
 
        spin_lock_irq(&efw->lock);
 
-       while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) {
+       dev_lock_changed = efw->dev_lock_changed;
+       queued = efw->push_ptr != efw->pull_ptr;
+
+       while (!dev_lock_changed && !queued) {
                prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
                spin_unlock_irq(&efw->lock);
                schedule();
@@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
                if (signal_pending(current))
                        return -ERESTARTSYS;
                spin_lock_irq(&efw->lock);
+               dev_lock_changed = efw->dev_lock_changed;
+               queued = efw->push_ptr != efw->pull_ptr;
        }
 
-       if (efw->dev_lock_changed)
+       spin_unlock_irq(&efw->lock);
+
+       if (dev_lock_changed)
                count = hwdep_read_locked(efw, buf, count, offset);
-       else if (efw->resp_queues > 0)
+       else if (queued)
                count = hwdep_read_resp_buf(efw, buf, count, offset);
 
-       spin_unlock_irq(&efw->lock);
-
        return count;
 }
 
@@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
        poll_wait(file, &efw->hwdep_wait, wait);
 
        spin_lock_irq(&efw->lock);
-       if (efw->dev_lock_changed || (efw->resp_queues > 0))
+       if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
                events = POLLIN | POLLRDNORM;
        else
                events = 0;
index 0639dcb..beb0a0f 100644 (file)
@@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry,
        else
                consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr);
 
-       snd_iprintf(buffer, "%d %d/%d\n",
-                   efw->resp_queues, consumed, snd_efw_resp_buf_size);
+       snd_iprintf(buffer, "%d/%d\n",
+                   consumed, snd_efw_resp_buf_size);
 }
 
 static void
index f550808..36a08ba 100644 (file)
@@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
        size_t capacity, till_end;
        struct snd_efw_transaction *t;
 
-       spin_lock_irq(&efw->lock);
-
        t = (struct snd_efw_transaction *)data;
        length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
 
+       spin_lock_irq(&efw->lock);
+
        if (efw->push_ptr < efw->pull_ptr)
                capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
        else
@@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
        }
 
        /* for hwdep */
-       efw->resp_queues++;
        wake_up(&efw->hwdep_wait);
 
        *rcode = RCODE_COMPLETE;
index 131267c..106406c 100644 (file)
 
 #include "tascam.h"
 
-static long hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
-                             long count)
-{
-       union snd_firewire_event event;
-
-       memset(&event, 0, sizeof(event));
-
-       event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
-       event.lock_status.status = (tscm->dev_lock_count > 0);
-       tscm->dev_lock_changed = false;
-
-       count = min_t(long, count, sizeof(event.lock_status));
-
-       if (copy_to_user(buf, &event, count))
-               return -EFAULT;
-
-       return count;
-}
-
 static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
                       loff_t *offset)
 {
        struct snd_tscm *tscm = hwdep->private_data;
        DEFINE_WAIT(wait);
-       union snd_firewire_event event;
+       union snd_firewire_event event = {
+               .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+       };
 
        spin_lock_irq(&tscm->lock);
 
@@ -54,10 +37,16 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
                spin_lock_irq(&tscm->lock);
        }
 
-       memset(&event, 0, sizeof(event));
-       count = hwdep_read_locked(tscm, buf, count);
+       event.lock_status.status = (tscm->dev_lock_count > 0);
+       tscm->dev_lock_changed = false;
+
        spin_unlock_irq(&tscm->lock);
 
+       count = min_t(long, count, sizeof(event.lock_status));
+
+       if (copy_to_user(buf, &event, count))
+               return -EFAULT;
+
        return count;
 }
 
index 7100f05..575cefd 100644 (file)
@@ -4855,6 +4855,7 @@ enum {
        ALC221_FIXUP_HP_FRONT_MIC,
        ALC292_FIXUP_TPT460,
        ALC298_FIXUP_SPK_VOLUME,
+       ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5516,6 +5517,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
        },
+       [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x90170151 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5560,6 +5570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
        SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
        SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
@@ -5895,6 +5906,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60170},
                {0x14, 0x90170120},
                {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell Inspiron 5468", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60180},
+               {0x14, 0x90170120},
+               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS),
        SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
index 6cf1f35..152292e 100644 (file)
@@ -1141,6 +1141,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
        case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+       case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
        case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
        case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
        case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
index 0e8a1f7..f39c0e9 100644 (file)
@@ -348,7 +348,7 @@ int main(int argc, char **argv)
        int notrigger = 0;
        char *dummy;
 
-       struct iio_channel_info *channels;
+       struct iio_channel_info *channels = NULL;
 
        register_cleanup();
 
@@ -456,7 +456,7 @@ int main(int argc, char **argv)
 
        if (notrigger) {
                printf("trigger-less mode selected\n");
-       } if (trig_num >= 0) {
+       } else if (trig_num >= 0) {
                char *trig_dev_name;
                ret = asprintf(&trig_dev_name, "%strigger%d", iio_dir, trig_num);
                if (ret < 0) {